From ed350626bcec4c767ceafe67f41329d87b318b3a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Javier=20J=2E=20Salmer=C3=B3n-Garc=C3=ADa?= Date: Fri, 7 Jul 2023 17:16:55 +0200 Subject: [PATCH] =?UTF-8?q?[bitnami/milvus]=20feat:=20=F0=9F=8E=89=20Add?= =?UTF-8?q?=20chart=20(#17425)?= MIME-Version: 1.0 Content-Type: text/plain; charset=UTF-8 Content-Transfer-Encoding: 8bit * [bitnami/milvus] feat: :tada: Add chart Signed-off-by: Javier Salmeron Garcia * chore: :wrench: Add to cd-pipeline Signed-off-by: Javier Salmeron Garcia * docs: :memo: Update documentation Signed-off-by: Javier Salmeron Garcia * test: :white_check_mark: Change health check endpoint Signed-off-by: Javier Salmeron Garcia * test: :white_check_mark: Update Attu endpoint Signed-off-by: Javier Salmeron Garcia --------- Signed-off-by: Javier Salmeron Garcia --- .github/workflows/cd-pipeline.yml | 1 + .vib/milvus/cypress/cypress.json | 9 + .../cypress/cypress/fixtures/collection.json | 8 + .../cypress/integration/milvus_spec.js | 45 + .../cypress/cypress/support/commands.js | 44 + .vib/milvus/cypress/cypress/support/index.js | 25 + .vib/milvus/cypress/cypress/support/utils.js | 8 + .vib/milvus/goss/goss.yaml | 102 + .vib/milvus/runtime-parameters.yaml | 103 + .vib/milvus/vib-publish.json | 88 + .vib/milvus/vib-verify.json | 71 + bitnami/milvus/.helmignore | 21 + bitnami/milvus/Chart.lock | 15 + bitnami/milvus/Chart.yaml | 42 + bitnami/milvus/README.md | 1613 ++++ bitnami/milvus/templates/NOTES.txt | 115 + bitnami/milvus/templates/_helpers.tpl | 1065 +++ bitnami/milvus/templates/attu/deployment.yaml | 179 + bitnami/milvus/templates/attu/hpa.yaml | 59 + .../templates/attu/ingress-tls-secret.yaml | 54 + bitnami/milvus/templates/attu/ingress.yaml | 69 + .../milvus/templates/attu/networkpolicy.yaml | 85 + bitnami/milvus/templates/attu/pdb.yaml | 32 + .../templates/attu/service-account.yaml | 26 + bitnami/milvus/templates/attu/service.yaml | 63 + bitnami/milvus/templates/attu/vpa.yaml | 51 + bitnami/milvus/templates/configmap.yaml | 23 + .../templates/data-coordinator/configmap.yaml | 23 + .../data-coordinator/deployment.yaml | 208 + .../data-coordinator/extra-configmap.yaml | 24 + .../templates/data-coordinator/hpa.yaml | 59 + .../data-coordinator/networkpolicy.yaml | 91 + .../templates/data-coordinator/pdb.yaml | 32 + .../data-coordinator/service-account.yaml | 26 + .../templates/data-coordinator/service.yaml | 77 + .../data-coordinator/servicemonitor.yaml | 61 + .../templates/data-coordinator/vpa.yaml | 51 + .../milvus/templates/data-node/configmap.yaml | 23 + .../templates/data-node/deployment.yaml | 208 + .../templates/data-node/extra-configmap.yaml | 24 + bitnami/milvus/templates/data-node/hpa.yaml | 59 + .../templates/data-node/networkpolicy.yaml | 88 + bitnami/milvus/templates/data-node/pdb.yaml | 32 + .../templates/data-node/service-account.yaml | 26 + .../milvus/templates/data-node/service.yaml | 77 + .../templates/data-node/servicemonitor.yaml | 61 + bitnami/milvus/templates/data-node/vpa.yaml | 51 + .../milvus/templates/externaletcd-secret.yaml | 23 + .../templates/externalkafka-secret.yaml | 23 + .../milvus/templates/externals3-secret.yaml | 24 + bitnami/milvus/templates/extra-configmap.yaml | 23 + bitnami/milvus/templates/extra-list.yaml | 9 + .../index-coordinator/configmap.yaml | 23 + .../index-coordinator/deployment.yaml | 208 + .../index-coordinator/extra-configmap.yaml | 24 + .../templates/index-coordinator/hpa.yaml | 59 + .../index-coordinator/networkpolicy.yaml | 88 + .../templates/index-coordinator/pdb.yaml | 32 + .../index-coordinator/service-account.yaml | 26 + .../templates/index-coordinator/service.yaml | 77 + .../index-coordinator/servicemonitor.yaml | 61 + .../templates/index-coordinator/vpa.yaml | 51 + .../templates/index-node/configmap.yaml | 23 + .../templates/index-node/deployment.yaml | 208 + .../templates/index-node/extra-configmap.yaml | 24 + bitnami/milvus/templates/index-node/hpa.yaml | 59 + .../templates/index-node/networkpolicy.yaml | 88 + bitnami/milvus/templates/index-node/pdb.yaml | 32 + .../templates/index-node/service-account.yaml | 26 + .../milvus/templates/index-node/service.yaml | 77 + .../templates/index-node/servicemonitor.yaml | 61 + bitnami/milvus/templates/index-node/vpa.yaml | 51 + bitnami/milvus/templates/init-job.yaml | 118 + bitnami/milvus/templates/proxy/configmap.yaml | 23 + .../milvus/templates/proxy/deployment.yaml | 210 + .../templates/proxy/extra-configmap.yaml | 24 + bitnami/milvus/templates/proxy/hpa.yaml | 59 + .../milvus/templates/proxy/networkpolicy.yaml | 88 + bitnami/milvus/templates/proxy/pdb.yaml | 32 + .../templates/proxy/service-account.yaml | 26 + bitnami/milvus/templates/proxy/service.yaml | 77 + .../templates/proxy/servicemonitor.yaml | 61 + bitnami/milvus/templates/proxy/vpa.yaml | 51 + .../query-coordinator/configmap.yaml | 23 + .../query-coordinator/deployment.yaml | 208 + .../query-coordinator/extra-configmap.yaml | 24 + .../templates/query-coordinator/hpa.yaml | 59 + .../query-coordinator/networkpolicy.yaml | 88 + .../templates/query-coordinator/pdb.yaml | 32 + .../query-coordinator/service-account.yaml | 26 + .../templates/query-coordinator/service.yaml | 77 + .../query-coordinator/servicemonitor.yaml | 61 + .../templates/query-coordinator/vpa.yaml | 51 + .../templates/query-node/configmap.yaml | 23 + .../templates/query-node/deployment.yaml | 208 + .../templates/query-node/extra-configmap.yaml | 24 + bitnami/milvus/templates/query-node/hpa.yaml | 59 + .../templates/query-node/networkpolicy.yaml | 88 + bitnami/milvus/templates/query-node/pdb.yaml | 32 + .../templates/query-node/service-account.yaml | 26 + .../milvus/templates/query-node/service.yaml | 77 + .../templates/query-node/servicemonitor.yaml | 61 + bitnami/milvus/templates/query-node/vpa.yaml | 51 + .../templates/root-coordinator/configmap.yaml | 23 + .../root-coordinator/deployment.yaml | 208 + .../root-coordinator/extra-configmap.yaml | 24 + .../templates/root-coordinator/hpa.yaml | 59 + .../root-coordinator/networkpolicy.yaml | 88 + .../templates/root-coordinator/pdb.yaml | 32 + .../root-coordinator/service-account.yaml | 26 + .../templates/root-coordinator/service.yaml | 77 + .../root-coordinator/servicemonitor.yaml | 61 + .../templates/root-coordinator/vpa.yaml | 51 + bitnami/milvus/templates/secret.yaml | 24 + bitnami/milvus/values.schema.json | 7487 +++++++++++++++++ bitnami/milvus/values.yaml | 4982 +++++++++++ 116 files changed, 21926 insertions(+) create mode 100644 .vib/milvus/cypress/cypress.json create mode 100644 .vib/milvus/cypress/cypress/fixtures/collection.json create mode 100644 .vib/milvus/cypress/cypress/integration/milvus_spec.js create mode 100644 .vib/milvus/cypress/cypress/support/commands.js create mode 100644 .vib/milvus/cypress/cypress/support/index.js create mode 100644 .vib/milvus/cypress/cypress/support/utils.js create mode 100644 .vib/milvus/goss/goss.yaml create mode 100644 .vib/milvus/runtime-parameters.yaml create mode 100644 .vib/milvus/vib-publish.json create mode 100644 .vib/milvus/vib-verify.json create mode 100644 bitnami/milvus/.helmignore create mode 100644 bitnami/milvus/Chart.lock create mode 100644 bitnami/milvus/Chart.yaml create mode 100644 bitnami/milvus/README.md create mode 100644 bitnami/milvus/templates/NOTES.txt create mode 100644 bitnami/milvus/templates/_helpers.tpl create mode 100644 bitnami/milvus/templates/attu/deployment.yaml create mode 100644 bitnami/milvus/templates/attu/hpa.yaml create mode 100644 bitnami/milvus/templates/attu/ingress-tls-secret.yaml create mode 100644 bitnami/milvus/templates/attu/ingress.yaml create mode 100644 bitnami/milvus/templates/attu/networkpolicy.yaml create mode 100644 bitnami/milvus/templates/attu/pdb.yaml create mode 100644 bitnami/milvus/templates/attu/service-account.yaml create mode 100644 bitnami/milvus/templates/attu/service.yaml create mode 100644 bitnami/milvus/templates/attu/vpa.yaml create mode 100644 bitnami/milvus/templates/configmap.yaml create mode 100644 bitnami/milvus/templates/data-coordinator/configmap.yaml create mode 100644 bitnami/milvus/templates/data-coordinator/deployment.yaml create mode 100644 bitnami/milvus/templates/data-coordinator/extra-configmap.yaml create mode 100644 bitnami/milvus/templates/data-coordinator/hpa.yaml create mode 100644 bitnami/milvus/templates/data-coordinator/networkpolicy.yaml create mode 100644 bitnami/milvus/templates/data-coordinator/pdb.yaml create mode 100644 bitnami/milvus/templates/data-coordinator/service-account.yaml create mode 100644 bitnami/milvus/templates/data-coordinator/service.yaml create mode 100644 bitnami/milvus/templates/data-coordinator/servicemonitor.yaml create mode 100644 bitnami/milvus/templates/data-coordinator/vpa.yaml create mode 100644 bitnami/milvus/templates/data-node/configmap.yaml create mode 100644 bitnami/milvus/templates/data-node/deployment.yaml create mode 100644 bitnami/milvus/templates/data-node/extra-configmap.yaml create mode 100644 bitnami/milvus/templates/data-node/hpa.yaml create mode 100644 bitnami/milvus/templates/data-node/networkpolicy.yaml create mode 100644 bitnami/milvus/templates/data-node/pdb.yaml create mode 100644 bitnami/milvus/templates/data-node/service-account.yaml create mode 100644 bitnami/milvus/templates/data-node/service.yaml create mode 100644 bitnami/milvus/templates/data-node/servicemonitor.yaml create mode 100644 bitnami/milvus/templates/data-node/vpa.yaml create mode 100644 bitnami/milvus/templates/externaletcd-secret.yaml create mode 100644 bitnami/milvus/templates/externalkafka-secret.yaml create mode 100644 bitnami/milvus/templates/externals3-secret.yaml create mode 100644 bitnami/milvus/templates/extra-configmap.yaml create mode 100644 bitnami/milvus/templates/extra-list.yaml create mode 100644 bitnami/milvus/templates/index-coordinator/configmap.yaml create mode 100644 bitnami/milvus/templates/index-coordinator/deployment.yaml create mode 100644 bitnami/milvus/templates/index-coordinator/extra-configmap.yaml create mode 100644 bitnami/milvus/templates/index-coordinator/hpa.yaml create mode 100644 bitnami/milvus/templates/index-coordinator/networkpolicy.yaml create mode 100644 bitnami/milvus/templates/index-coordinator/pdb.yaml create mode 100644 bitnami/milvus/templates/index-coordinator/service-account.yaml create mode 100644 bitnami/milvus/templates/index-coordinator/service.yaml create mode 100644 bitnami/milvus/templates/index-coordinator/servicemonitor.yaml create mode 100644 bitnami/milvus/templates/index-coordinator/vpa.yaml create mode 100644 bitnami/milvus/templates/index-node/configmap.yaml create mode 100644 bitnami/milvus/templates/index-node/deployment.yaml create mode 100644 bitnami/milvus/templates/index-node/extra-configmap.yaml create mode 100644 bitnami/milvus/templates/index-node/hpa.yaml create mode 100644 bitnami/milvus/templates/index-node/networkpolicy.yaml create mode 100644 bitnami/milvus/templates/index-node/pdb.yaml create mode 100644 bitnami/milvus/templates/index-node/service-account.yaml create mode 100644 bitnami/milvus/templates/index-node/service.yaml create mode 100644 bitnami/milvus/templates/index-node/servicemonitor.yaml create mode 100644 bitnami/milvus/templates/index-node/vpa.yaml create mode 100644 bitnami/milvus/templates/init-job.yaml create mode 100644 bitnami/milvus/templates/proxy/configmap.yaml create mode 100644 bitnami/milvus/templates/proxy/deployment.yaml create mode 100644 bitnami/milvus/templates/proxy/extra-configmap.yaml create mode 100644 bitnami/milvus/templates/proxy/hpa.yaml create mode 100644 bitnami/milvus/templates/proxy/networkpolicy.yaml create mode 100644 bitnami/milvus/templates/proxy/pdb.yaml create mode 100644 bitnami/milvus/templates/proxy/service-account.yaml create mode 100644 bitnami/milvus/templates/proxy/service.yaml create mode 100644 bitnami/milvus/templates/proxy/servicemonitor.yaml create mode 100644 bitnami/milvus/templates/proxy/vpa.yaml create mode 100644 bitnami/milvus/templates/query-coordinator/configmap.yaml create mode 100644 bitnami/milvus/templates/query-coordinator/deployment.yaml create mode 100644 bitnami/milvus/templates/query-coordinator/extra-configmap.yaml create mode 100644 bitnami/milvus/templates/query-coordinator/hpa.yaml create mode 100644 bitnami/milvus/templates/query-coordinator/networkpolicy.yaml create mode 100644 bitnami/milvus/templates/query-coordinator/pdb.yaml create mode 100644 bitnami/milvus/templates/query-coordinator/service-account.yaml create mode 100644 bitnami/milvus/templates/query-coordinator/service.yaml create mode 100644 bitnami/milvus/templates/query-coordinator/servicemonitor.yaml create mode 100644 bitnami/milvus/templates/query-coordinator/vpa.yaml create mode 100644 bitnami/milvus/templates/query-node/configmap.yaml create mode 100644 bitnami/milvus/templates/query-node/deployment.yaml create mode 100644 bitnami/milvus/templates/query-node/extra-configmap.yaml create mode 100644 bitnami/milvus/templates/query-node/hpa.yaml create mode 100644 bitnami/milvus/templates/query-node/networkpolicy.yaml create mode 100644 bitnami/milvus/templates/query-node/pdb.yaml create mode 100644 bitnami/milvus/templates/query-node/service-account.yaml create mode 100644 bitnami/milvus/templates/query-node/service.yaml create mode 100644 bitnami/milvus/templates/query-node/servicemonitor.yaml create mode 100644 bitnami/milvus/templates/query-node/vpa.yaml create mode 100644 bitnami/milvus/templates/root-coordinator/configmap.yaml create mode 100644 bitnami/milvus/templates/root-coordinator/deployment.yaml create mode 100644 bitnami/milvus/templates/root-coordinator/extra-configmap.yaml create mode 100644 bitnami/milvus/templates/root-coordinator/hpa.yaml create mode 100644 bitnami/milvus/templates/root-coordinator/networkpolicy.yaml create mode 100644 bitnami/milvus/templates/root-coordinator/pdb.yaml create mode 100644 bitnami/milvus/templates/root-coordinator/service-account.yaml create mode 100644 bitnami/milvus/templates/root-coordinator/service.yaml create mode 100644 bitnami/milvus/templates/root-coordinator/servicemonitor.yaml create mode 100644 bitnami/milvus/templates/root-coordinator/vpa.yaml create mode 100644 bitnami/milvus/templates/secret.yaml create mode 100644 bitnami/milvus/values.schema.json create mode 100644 bitnami/milvus/values.yaml diff --git a/.github/workflows/cd-pipeline.yml b/.github/workflows/cd-pipeline.yml index 479ad60b63..604bbc12c9 100644 --- a/.github/workflows/cd-pipeline.yml +++ b/.github/workflows/cd-pipeline.yml @@ -67,6 +67,7 @@ on: # rebuild any PRs and main branch changes - 'bitnami/memcached/**' - 'bitnami/metallb/**' - 'bitnami/metrics-server/**' + - 'bitnami/milvus/**' - 'bitnami/minio/**' - 'bitnami/mongodb/**' - 'bitnami/mongodb-sharded/**' diff --git a/.vib/milvus/cypress/cypress.json b/.vib/milvus/cypress/cypress.json new file mode 100644 index 0000000000..97f78944f1 --- /dev/null +++ b/.vib/milvus/cypress/cypress.json @@ -0,0 +1,9 @@ +{ + "baseUrl": "http://localhost", + "env": { + "username": "root", + "password": "ComplicatedPassword123!4", + "host": "milvus-proxy:80" + }, + "defaultCommandTimeout": 30000 +} diff --git a/.vib/milvus/cypress/cypress/fixtures/collection.json b/.vib/milvus/cypress/cypress/fixtures/collection.json new file mode 100644 index 0000000000..088dab9bf8 --- /dev/null +++ b/.vib/milvus/cypress/cypress/fixtures/collection.json @@ -0,0 +1,8 @@ +{ + "collection": { + "name": "vib_col", + "idName": "vib_id", + "vectorName": "vib_vector", + "nlist": 2 + } +} diff --git a/.vib/milvus/cypress/cypress/integration/milvus_spec.js b/.vib/milvus/cypress/cypress/integration/milvus_spec.js new file mode 100644 index 0000000000..ef48c9e9bd --- /dev/null +++ b/.vib/milvus/cypress/cypress/integration/milvus_spec.js @@ -0,0 +1,45 @@ +/* + * Copyright VMware, Inc. + * SPDX-License-Identifier: APACHE-2.0 + */ + +/// +import { + random, +} from '../support/utils'; + +it('allows to create a collection', () => { + cy.login(); + // Go to the collections page + cy.visit('#/collections'); + cy.get('button').contains('Create Collection').click({force: true}); + // Create a collection + cy.fixture('collection').then((c) => { + cy.get('[data-cy="collection_name"]').type(`${c.collection.name}${random}`); + cy.get('[data-cy="collection_name"]').type(`${c.collection.name}${random}`); + cy.get('div[class*="MuiTextField"]').contains('div', 'ID Name').within(() => { + cy.get('input').type(`${c.collection.idName}${random}`) + }); + cy.get('div[class*="MuiTextField"]').contains('div','Vector Name').within(() => { + cy.get('input').type(`${c.collection.vectorName}${random}{enter}`); + }); + cy.visit('/'); + cy.contains('Overview'); + cy.visit('#/collections'); + cy.get(`[href$="${c.collection.name}${random}"]`).click(); + cy.get('[role="button"]').contains('Create Index').click({force: true}); + cy.get('[type="number"]').type(`${c.collection.nlist}{enter}`); + // Return to the collections page + cy.visit('/'); + cy.contains('Overview'); + cy.visit('#/collections'); + cy.get(`[href$="${c.collection.name}${random}"]`).trigger('mouseover'); + // Load sample data (we use first as the newest element is the first of the list) + cy.get('[aria-label="load"]').first().click(); + cy.get('button').contains('Load').click({force: true}); + cy.get(`[href$="${c.collection.name}${random}"]`).trigger('mouseover'); + cy.get('[aria-label*="Import"]').first().click({force: true}); + cy.get('button').contains('Import').click({force: true}); + cy.contains('loaded'); + }); +}); diff --git a/.vib/milvus/cypress/cypress/support/commands.js b/.vib/milvus/cypress/cypress/support/commands.js new file mode 100644 index 0000000000..af48ef9757 --- /dev/null +++ b/.vib/milvus/cypress/cypress/support/commands.js @@ -0,0 +1,44 @@ +/* + * Copyright VMware, Inc. + * SPDX-License-Identifier: APACHE-2.0 + */ + +const COMMAND_DELAY = 2000; + +for (const command of ['click']) { + Cypress.Commands.overwrite(command, (originalFn, ...args) => { + const origVal = originalFn(...args); + + return new Promise((resolve) => { + setTimeout(() => { + resolve(origVal); + }, COMMAND_DELAY); + }); + }); +} + +Cypress.Commands.add( + 'login', + (username = Cypress.env('username'), password = Cypress.env('password'), + host = Cypress.env('host')) => { + cy.visit('/?#/connect'); + cy.get('[data-cy="address"]').should('be.enabled').clear({force: true}).type(host); + cy.get('[data-cy="username"]').should('be.enabled').type(username); + cy.get('[data-cy="password"]').should('be.enabled').type(`${password}{enter}`); + cy.contains('Overview'); + } +); + +Cypress.on('uncaught:exception', (err) => { + if (err.message.includes('Cannot read properties of')) { + return false; + } + // We expect an error "Failed to execute 'observe' on 'IntersectionObserver'" + // during the installation of a template so we add an exception + if (err.message.includes("Failed to execute 'observe' on 'IntersectionObserver'")) { + return false; + } + + // we still want to ensure there are no other unexpected + // errors, so we let them fail the test +}) diff --git a/.vib/milvus/cypress/cypress/support/index.js b/.vib/milvus/cypress/cypress/support/index.js new file mode 100644 index 0000000000..fbd682739a --- /dev/null +++ b/.vib/milvus/cypress/cypress/support/index.js @@ -0,0 +1,25 @@ +/* + * Copyright VMware, Inc. + * SPDX-License-Identifier: APACHE-2.0 + */ + +// *********************************************************** +// This example support/index.js is processed and +// loaded automatically before your test files. +// +// This is a great place to put global configuration and +// behavior that modifies Cypress. +// +// You can change the location of this file or turn off +// automatically serving support files with the +// 'supportFile' configuration option. +// +// You can read more here: +// https://on.cypress.io/configuration +// *********************************************************** + +// Import commands.js using ES2015 syntax: +import './commands'; + +// Alternatively you can use CommonJS syntax: +// require('./commands') diff --git a/.vib/milvus/cypress/cypress/support/utils.js b/.vib/milvus/cypress/cypress/support/utils.js new file mode 100644 index 0000000000..3963b09ba2 --- /dev/null +++ b/.vib/milvus/cypress/cypress/support/utils.js @@ -0,0 +1,8 @@ +/* + * Copyright VMware, Inc. + * SPDX-License-Identifier: APACHE-2.0 + */ + +/// + +export let random = (Math.random() + 1).toString(36).substring(7); diff --git a/.vib/milvus/goss/goss.yaml b/.vib/milvus/goss/goss.yaml new file mode 100644 index 0000000000..b36c337245 --- /dev/null +++ b/.vib/milvus/goss/goss.yaml @@ -0,0 +1,102 @@ +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +http: + http://milvus-data-coordinator:{{ .Vars.dataCoord.service.ports.metrics }}/metrics: + status: 200 + body: + - milvus_num_node + http://milvus-query-coordinator:{{ .Vars.queryCoord.service.ports.metrics }}/metrics: + status: 200 + body: + - milvus_num_node + http://milvus-root-coordinator:{{ .Vars.rootCoord.service.ports.metrics }}/metrics: + status: 200 + body: + - milvus_num_node + http://milvus-index-coordinator:{{ .Vars.indexCoord.service.ports.metrics }}/metrics: + status: 200 + body: + - milvus_num_node + http://milvus-data-node:{{ .Vars.dataNode.service.ports.metrics }}/metrics: + status: 200 + body: + - milvus_num_node + http://milvus-query-node:{{ .Vars.queryNode.service.ports.metrics }}/metrics: + status: 200 + body: + - milvus_num_node + http://milvus-index-node:{{ .Vars.indexNode.service.ports.metrics }}/metrics: + status: 200 + body: + - milvus_num_node + http://milvus-proxy:{{ .Vars.proxy.service.ports.metrics }}/metrics: + status: 200 + body: + - milvus_num_node + http://milvus-attu:{{ .Vars.attu.service.ports.http }}: + status: 200 + http://127.0.0.1:{{ .Vars.proxy.containerPorts.metrics }}/metrics: + status: 200 + body: + - milvus_num_node +addr: + tcp://milvus-data-coordinator:{{ .Vars.dataCoord.service.ports.grpc }}: + reachable: true + timeout: 500 + tcp://milvus-query-coordinator:{{ .Vars.queryCoord.service.ports.grpc }}: + reachable: true + timeout: 500 + tcp://milvus-root-coordinator:{{ .Vars.rootCoord.service.ports.grpc }}: + reachable: true + timeout: 500 + tcp://milvus-index-coordinator:{{ .Vars.indexCoord.service.ports.grpc }}: + reachable: true + timeout: 500 + tcp://milvus-data-node:{{ .Vars.dataNode.service.ports.grpc }}: + reachable: true + timeout: 500 + tcp://milvus-query-node:{{ .Vars.queryNode.service.ports.grpc }}: + reachable: true + timeout: 500 + tcp://milvus-index-node:{{ .Vars.indexNode.service.ports.grpc }}: + reachable: true + timeout: 500 + tcp://milvus-proxy:{{ .Vars.proxy.service.ports.grpc }}: + reachable: true + timeout: 500 + tcp://127.0.0.1:{{ .Vars.proxy.containerPorts.grpc }}: + reachable: true + timeout: 500 +file: + /opt/bitnami/milvus/configs/milvus.yaml: + exists: true + filetype: file + mode: '0644' + contains: + - /timeTickInterval.*{{ .Vars.proxy.extraConfig.proxy.timeTickInterval }}/ + - /gracefulStopTimeout.*{{ .Vars.milvus.extraConfig.common.gracefulStopTimeout }}/ +command: + {{- $uid := .Vars.proxy.containerSecurityContext.runAsUser }} + {{- $gid := .Vars.proxy.podSecurityContext.fsGroup }} + check-user-info: + # The UID and GID should always be either the one specified as vars (always a bigger number that the def + +# Milvus packaged by Bitnami + +Milvus is a cloud-native, open-source vector database solution for AI applications and similarity search. Features high scalability, hibrid search and unified lambda structure. + +[Overview of Milvus](https://grafana.com/oss) + +Trademarks: This software listing is packaged by Bitnami. The respective trademarks mentioned in the offering are owned by the respective companies, and use of them does not imply any affiliation or endorsement. + +## TL;DR + +```console +helm install my-release oci://registry-1.docker.io/bitnamicharts/milvus +``` + +## Introduction + +Bitnami charts for Helm are carefully engineered, actively maintained and are the quickest and easiest way to deploy containers on a Kubernetes cluster that are ready to handle production workloads. + +This chart bootstraps a [Milvus](https://github.com/grafana/loki) Deployment in a [Kubernetes](https://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.dev/) for deployment and management of Helm Charts in clusters. + +[Learn more about the default configuration of the chart](https://docs.bitnami.com/kubernetes/infrastructure/milvus/get-started/). + +Looking to use Milvus in production? Try [VMware Application Catalog](https://bitnami.com/enterprise), the enterprise edition of Bitnami Application Catalog. + +## Prerequisites + +- Kubernetes 1.19+ +- Helm 3.2.0+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm install my-release oci://registry-1.docker.io/bitnamicharts/milvus +``` + +The command deploys milvus on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +### Global parameters + +| Name | Description | Value | +| ------------------------- | ----------------------------------------------- | ----- | +| `global.imageRegistry` | Global Docker image registry | `""` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | +| `global.storageClass` | Global StorageClass for Persistent Volume(s) | `""` | + +### Common parameters + +| Name | Description | Value | +| ------------------------ | --------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `nameOverride` | String to partially override common.names.fullname | `""` | +| `fullnameOverride` | String to fully override common.names.fullname | `""` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `clusterDomain` | Kubernetes cluster domain name | `cluster.local` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the deployments/statefulsets | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the deployments/statefulsets | `["infinity"]` | + +### Common Milvus Parameters + +| Name | Description | Value | +| ----------------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------------------------- | --------------------- | +| `milvus.image.registry` | Milvus image registry | `docker.io` | +| `milvus.image.repository` | Milvus image repository | `bitnami/milvus` | +| `milvus.image.tag` | Milvus image tag (immutable tags are recommended) | `2.2.10-debian-11-r3` | +| `milvus.image.digest` | Milvus image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `milvus.image.pullPolicy` | Milvus image pull policy | `IfNotPresent` | +| `milvus.image.pullSecrets` | Milvus image pull secrets | `[]` | +| `milvus.image.debug` | Enable debug mode | `false` | +| `milvus.auth.enabled` | enable Milvus authentication | `false` | +| `milvus.auth.username` | Milvus username | `user` | +| `milvus.auth.password` | Milvus username password | `""` | +| `milvus.auth.rootPassword` | Milvus root password | `""` | +| `milvus.auth.existingSecret` | Name of a secret containing the Milvus password | `""` | +| `milvus.defaultConfig` | Milvus components default configuration | `""` | +| `milvus.extraConfig` | Extra configuration parameters | `{}` | +| `milvus.existingConfigMap` | name of a ConfigMap with existing configuration for the default configuration | `""` | +| `milvus.extraConfigExistingConfigMap` | name of a ConfigMap with existing configuration for the Dashboard | `""` | +| `initJob.forceRun` | Force the run of the credential job | `false` | +| `initJob.image.registry` | PyMilvus image registry | `docker.io` | +| `initJob.image.repository` | PyMilvus image repository | `bitnami/pymilvus` | +| `initJob.image.tag` | PyMilvus image tag (immutable tags are recommended) | `2.2.13-debian-11-r1` | +| `initJob.image.digest` | PyMilvus image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag image tag (immutable tags are recommended) | `""` | +| `initJob.image.pullPolicy` | PyMilvus image pull policy | `IfNotPresent` | +| `initJob.image.pullSecrets` | PyMilvus image pull secrets | `[]` | +| `initJob.enableDefaultInitContainers` | Deploy default init containers | `true` | +| `initJob.backoffLimit` | set backoff limit of the job | `10` | +| `initJob.extraVolumes` | Optionally specify extra list of additional volumes for the credential init job | `[]` | +| `initJob.extraCommands` | Extra commands to pass to the generation job | `""` | +| `initJob.containerSecurityContext.enabled` | Enabled credential init job containers' Security Context | `true` | +| `initJob.containerSecurityContext.runAsUser` | Set credential init job containers' Security Context runAsUser | `1001` | +| `initJob.containerSecurityContext.runAsNonRoot` | Set credential init job containers' Security Context runAsNonRoot | `true` | +| `initJob.containerSecurityContext.readOnlyRootFilesystem` | Set credential init job containers' Security Context runAsNonRoot | `true` | +| `initJob.containerSecurityContext.allowPrivilegeEscalation` | Set container's privilege escalation | `false` | +| `initJob.containerSecurityContext.capabilities.drop` | Set container's Security Context runAsNonRoot | `["ALL"]` | +| `initJob.podSecurityContext.enabled` | Enabled credential init job pods' Security Context | `true` | +| `initJob.podSecurityContext.fsGroup` | Set credential init job pod's Security Context fsGroup | `1001` | +| `initJob.podSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `initJob.extraEnvVars` | Array containing extra env vars to configure the credential init job | `[]` | +| `initJob.extraEnvVarsCM` | ConfigMap containing extra env vars to configure the credential init job | `""` | +| `initJob.extraEnvVarsSecret` | Secret containing extra env vars to configure the credential init job (in case of sensitive data) | `""` | +| `initJob.extraVolumeMounts` | Array of extra volume mounts to be added to the jwt Container (evaluated as template). Normally used with `extraVolumes`. | `[]` | +| `initJob.resources.limits` | The resources limits for the container | `{}` | +| `initJob.resources.requests` | The requested resources for the container | `{}` | +| `initJob.hostAliases` | Add deployment host aliases | `[]` | +| `initJob.annotations` | Add annotations to the job | `{}` | +| `initJob.podLabels` | Additional pod labels | `{}` | +| `initJob.podAnnotations` | Additional pod annotations | `{}` | + +### Data Coordinator Deployment Parameters + +| Name | Description | Value | +| ------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------- | +| `dataCoord.enabled` | Enable Data Coordinator deployment | `true` | +| `dataCoord.extraEnvVars` | Array with extra environment variables to add to data coordinator nodes | `[]` | +| `dataCoord.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for data coordinator nodes | `""` | +| `dataCoord.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for data coordinator nodes | `""` | +| `dataCoord.defaultConfig` | Default override configuration from the common set in milvus.defaultConfig | `""` | +| `dataCoord.existingConfigMap` | name of a ConfigMap with existing configuration for the default configuration | `""` | +| `dataCoord.extraConfig` | Override configuration | `{}` | +| `dataCoord.extraConfigExistingConfigMap` | name of a ConfigMap with existing configuration for the Dashboard | `""` | +| `dataCoord.command` | Override default container command (useful when using custom images) | `[]` | +| `dataCoord.args` | Override default container args (useful when using custom images) | `[]` | +| `dataCoord.replicaCount` | Number of Data Coordinator replicas to deploy | `1` | +| `dataCoord.containerPorts.grpc` | GRPC port for Data Coordinator | `19530` | +| `dataCoord.containerPorts.metrics` | Metrics port for Data Coordinator | `9091` | +| `dataCoord.livenessProbe.enabled` | Enable livenessProbe on Data Coordinator nodes | `true` | +| `dataCoord.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `dataCoord.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `dataCoord.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `dataCoord.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `dataCoord.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `dataCoord.readinessProbe.enabled` | Enable readinessProbe on Data Coordinator nodes | `true` | +| `dataCoord.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `dataCoord.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `dataCoord.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `dataCoord.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `dataCoord.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `dataCoord.startupProbe.enabled` | Enable startupProbe on Data Coordinator containers | `false` | +| `dataCoord.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `dataCoord.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `dataCoord.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `dataCoord.startupProbe.failureThreshold` | Failure threshold for startupProbe | `5` | +| `dataCoord.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `dataCoord.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `dataCoord.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `dataCoord.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `dataCoord.resources.limits` | The resources limits for the data coordinator containers | `{}` | +| `dataCoord.resources.requests` | The requested resources for the data coordinator containers | `{}` | +| `dataCoord.podSecurityContext.enabled` | Enabled Data Coordinator pods' Security Context | `true` | +| `dataCoord.podSecurityContext.fsGroup` | Set Data Coordinator pod's Security Context fsGroup | `1001` | +| `dataCoord.podSecurityContext.seccompProfile.type` | Set Data Coordinator container's Security Context seccomp profile | `RuntimeDefault` | +| `dataCoord.containerSecurityContext.enabled` | Enabled Data Coordinator containers' Security Context | `true` | +| `dataCoord.containerSecurityContext.runAsUser` | Set Data Coordinator containers' Security Context runAsUser | `1001` | +| `dataCoord.containerSecurityContext.runAsNonRoot` | Set Data Coordinator containers' Security Context runAsNonRoot | `true` | +| `dataCoord.containerSecurityContext.readOnlyRootFilesystem` | Set Data Coordinator containers' Security Context runAsNonRoot | `true` | +| `dataCoord.containerSecurityContext.allowPrivilegeEscalation` | Set Data Coordinator container's privilege escalation | `false` | +| `dataCoord.containerSecurityContext.capabilities.drop` | Set Data Coordinator container's Security Context runAsNonRoot | `["ALL"]` | +| `dataCoord.lifecycleHooks` | for the data coordinator container(s) to automate configuration before or after startup | `{}` | +| `dataCoord.runtimeClassName` | Name of the runtime class to be used by pod(s) | `""` | +| `dataCoord.hostAliases` | data coordinator pods host aliases | `[]` | +| `dataCoord.podLabels` | Extra labels for data coordinator pods | `{}` | +| `dataCoord.podAnnotations` | Annotations for data coordinator pods | `{}` | +| `dataCoord.podAffinityPreset` | Pod affinity preset. Ignored if `data coordinator.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `dataCoord.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `data coordinator.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `dataCoord.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `data coordinator.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `dataCoord.nodeAffinityPreset.key` | Node label key to match. Ignored if `data coordinator.affinity` is set | `""` | +| `dataCoord.nodeAffinityPreset.values` | Node label values to match. Ignored if `data coordinator.affinity` is set | `[]` | +| `dataCoord.affinity` | Affinity for Data Coordinator pods assignment | `{}` | +| `dataCoord.nodeSelector` | Node labels for Data Coordinator pods assignment | `{}` | +| `dataCoord.tolerations` | Tolerations for Data Coordinator pods assignment | `[]` | +| `dataCoord.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains | `[]` | +| `dataCoord.priorityClassName` | Data Coordinator pods' priorityClassName | `""` | +| `dataCoord.schedulerName` | Kubernetes pod scheduler registry | `""` | +| `dataCoord.updateStrategy.type` | Data Coordinator statefulset strategy type | `RollingUpdate` | +| `dataCoord.updateStrategy.rollingUpdate` | Data Coordinator statefulset rolling update configuration parameters | `{}` | +| `dataCoord.extraVolumes` | Optionally specify extra list of additional volumes for the Data Coordinator pod(s) | `[]` | +| `dataCoord.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Data Coordinator container(s) | `[]` | +| `dataCoord.sidecars` | Add additional sidecar containers to the Data Coordinator pod(s) | `[]` | +| `dataCoord.enableDefaultInitContainers` | Deploy default init containers | `true` | +| `dataCoord.initContainers` | Add additional init containers to the Data Coordinator pod(s) | `[]` | +| `dataCoord.serviceAccount.create` | Enable creation of ServiceAccount for Data Coordinator pods | `false` | +| `dataCoord.serviceAccount.name` | The name of the ServiceAccount to use | `""` | +| `dataCoord.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` | +| `dataCoord.serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `dataCoord.pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | +| `dataCoord.pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `1` | +| `dataCoord.pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | + +### Data Coordinator Autoscaling configuration + +| Name | Description | Value | +| --------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `dataCoord.autoscaling.vpa.enabled` | Enable VPA | `false` | +| `dataCoord.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `dataCoord.autoscaling.vpa.controlledResources` | VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory | `[]` | +| `dataCoord.autoscaling.vpa.maxAllowed` | VPA Max allowed resources for the pod | `{}` | +| `dataCoord.autoscaling.vpa.minAllowed` | VPA Min allowed resources for the pod | `{}` | +| `dataCoord.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod | `Auto` | +| `dataCoord.autoscaling.hpa.enabled` | Enable HPA for Milvus Data Plane | `false` | +| `dataCoord.autoscaling.hpa.annotations` | Annotations for HPA resource | `{}` | +| `dataCoord.autoscaling.hpa.minReplicas` | Minimum number of Milvus Data Plane replicas | `""` | +| `dataCoord.autoscaling.hpa.maxReplicas` | Maximum number of Milvus Data Plane replicas | `""` | +| `dataCoord.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `dataCoord.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | + +### Data Coordinator Traffic Exposure Parameters + +| Name | Description | Value | +| ------------------------------------------------- | ---------------------------------------------------------------- | ----------- | +| `dataCoord.service.type` | Data Coordinator service type | `ClusterIP` | +| `dataCoord.service.ports.grpc` | Data Coordinator GRPC service port | `19530` | +| `dataCoord.service.ports.metrics` | Data Coordinator Metrics service port | `9091` | +| `dataCoord.service.nodePorts.grpc` | Node port for GRPC | `""` | +| `dataCoord.service.nodePorts.metrics` | Node port for Metrics | `""` | +| `dataCoord.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `dataCoord.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `dataCoord.service.clusterIP` | Data Coordinator service Cluster IP | `""` | +| `dataCoord.service.loadBalancerIP` | Data Coordinator service Load Balancer IP | `""` | +| `dataCoord.service.loadBalancerSourceRanges` | Data Coordinator service Load Balancer sources | `[]` | +| `dataCoord.service.externalTrafficPolicy` | Data Coordinator service external traffic policy | `Cluster` | +| `dataCoord.service.annotations` | Additional custom annotations for Data Coordinator service | `{}` | +| `dataCoord.service.extraPorts` | Extra ports to expose in the Data Coordinator service | `[]` | +| `dataCoord.networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `dataCoord.networkPolicy.allowExternal` | The Policy model to apply | `true` | +| `dataCoord.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `dataCoord.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `dataCoord.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `dataCoord.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | + +### Data Coordinator Metrics Parameters + +| Name | Description | Value | +| ---------------------------------------------------- | ------------------------------------------------------------------------------------- | ------- | +| `dataCoord.metrics.enabled` | Enable metrics | `false` | +| `dataCoord.metrics.annotations` | Annotations for the server service in order to scrape metrics | `{}` | +| `dataCoord.metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | +| `dataCoord.metrics.serviceMonitor.annotations` | Annotations for the ServiceMonitor Resource | `""` | +| `dataCoord.metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` | +| `dataCoord.metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `dataCoord.metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `dataCoord.metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `dataCoord.metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `dataCoord.metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `dataCoord.metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `dataCoord.metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `dataCoord.metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | + +### Root Coordinator Deployment Parameters + +| Name | Description | Value | +| ------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------- | +| `rootCoord.enabled` | Enable Root Coordinator deployment | `true` | +| `rootCoord.extraEnvVars` | Array with extra environment variables to add to data coordinator nodes | `[]` | +| `rootCoord.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for data coordinator nodes | `""` | +| `rootCoord.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for data coordinator nodes | `""` | +| `rootCoord.defaultConfig` | Default override configuration from the common set in milvus.defaultConfig | `""` | +| `rootCoord.existingConfigMap` | name of a ConfigMap with existing configuration for the default configuration | `""` | +| `rootCoord.extraConfig` | Override configuration | `{}` | +| `rootCoord.extraConfigExistingConfigMap` | name of a ConfigMap with existing configuration for the Dashboard | `""` | +| `rootCoord.command` | Override default container command (useful when using custom images) | `[]` | +| `rootCoord.args` | Override default container args (useful when using custom images) | `[]` | +| `rootCoord.replicaCount` | Number of Root Coordinator replicas to deploy | `1` | +| `rootCoord.containerPorts.grpc` | GRPC port for Root Coordinator | `19530` | +| `rootCoord.containerPorts.metrics` | Metrics port for Root Coordinator | `9091` | +| `rootCoord.livenessProbe.enabled` | Enable livenessProbe on Root Coordinator nodes | `true` | +| `rootCoord.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `rootCoord.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `rootCoord.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `rootCoord.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `rootCoord.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `rootCoord.readinessProbe.enabled` | Enable readinessProbe on Root Coordinator nodes | `true` | +| `rootCoord.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `rootCoord.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `rootCoord.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `rootCoord.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `rootCoord.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `rootCoord.startupProbe.enabled` | Enable startupProbe on Root Coordinator containers | `false` | +| `rootCoord.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `rootCoord.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `rootCoord.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `rootCoord.startupProbe.failureThreshold` | Failure threshold for startupProbe | `5` | +| `rootCoord.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `rootCoord.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `rootCoord.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `rootCoord.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `rootCoord.resources.limits` | The resources limits for the data coordinator containers | `{}` | +| `rootCoord.resources.requests` | The requested resources for the data coordinator containers | `{}` | +| `rootCoord.podSecurityContext.enabled` | Enabled Root Coordinator pods' Security Context | `true` | +| `rootCoord.podSecurityContext.fsGroup` | Set Root Coordinator pod's Security Context fsGroup | `1001` | +| `rootCoord.podSecurityContext.seccompProfile.type` | Set Root Coordinator container's Security Context seccomp profile | `RuntimeDefault` | +| `rootCoord.containerSecurityContext.enabled` | Enabled Root Coordinator containers' Security Context | `true` | +| `rootCoord.containerSecurityContext.runAsUser` | Set Root Coordinator containers' Security Context runAsUser | `1001` | +| `rootCoord.containerSecurityContext.runAsNonRoot` | Set Root Coordinator containers' Security Context runAsNonRoot | `true` | +| `rootCoord.containerSecurityContext.readOnlyRootFilesystem` | Set Root Coordinator containers' Security Context runAsNonRoot | `true` | +| `rootCoord.containerSecurityContext.allowPrivilegeEscalation` | Set Root Coordinator container's privilege escalation | `false` | +| `rootCoord.containerSecurityContext.capabilities.drop` | Set Root Coordinator container's Security Context runAsNonRoot | `["ALL"]` | +| `rootCoord.lifecycleHooks` | for the data coordinator container(s) to automate configuration before or after startup | `{}` | +| `rootCoord.runtimeClassName` | Name of the runtime class to be used by pod(s) | `""` | +| `rootCoord.hostAliases` | data coordinator pods host aliases | `[]` | +| `rootCoord.podLabels` | Extra labels for data coordinator pods | `{}` | +| `rootCoord.podAnnotations` | Annotations for data coordinator pods | `{}` | +| `rootCoord.podAffinityPreset` | Pod affinity preset. Ignored if `data coordinator.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `rootCoord.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `data coordinator.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `rootCoord.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `data coordinator.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `rootCoord.nodeAffinityPreset.key` | Node label key to match. Ignored if `data coordinator.affinity` is set | `""` | +| `rootCoord.nodeAffinityPreset.values` | Node label values to match. Ignored if `data coordinator.affinity` is set | `[]` | +| `rootCoord.affinity` | Affinity for Root Coordinator pods assignment | `{}` | +| `rootCoord.nodeSelector` | Node labels for Root Coordinator pods assignment | `{}` | +| `rootCoord.tolerations` | Tolerations for Root Coordinator pods assignment | `[]` | +| `rootCoord.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains | `[]` | +| `rootCoord.priorityClassName` | Root Coordinator pods' priorityClassName | `""` | +| `rootCoord.schedulerName` | Kubernetes pod scheduler registry | `""` | +| `rootCoord.updateStrategy.type` | Root Coordinator statefulset strategy type | `RollingUpdate` | +| `rootCoord.updateStrategy.rollingUpdate` | Root Coordinator statefulset rolling update configuration parameters | `{}` | +| `rootCoord.extraVolumes` | Optionally specify extra list of additional volumes for the Root Coordinator pod(s) | `[]` | +| `rootCoord.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Root Coordinator container(s) | `[]` | +| `rootCoord.sidecars` | Add additional sidecar containers to the Root Coordinator pod(s) | `[]` | +| `rootCoord.enableDefaultInitContainers` | Deploy default init containers | `true` | +| `rootCoord.initContainers` | Add additional init containers to the Root Coordinator pod(s) | `[]` | +| `rootCoord.serviceAccount.create` | Enable creation of ServiceAccount for Root Coordinator pods | `false` | +| `rootCoord.serviceAccount.name` | The name of the ServiceAccount to use | `""` | +| `rootCoord.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` | +| `rootCoord.serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `rootCoord.pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | +| `rootCoord.pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `1` | +| `rootCoord.pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | + +### Root Coordinator Autoscaling configuration + +| Name | Description | Value | +| --------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `rootCoord.autoscaling.vpa.enabled` | Enable VPA | `false` | +| `rootCoord.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `rootCoord.autoscaling.vpa.controlledResources` | VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory | `[]` | +| `rootCoord.autoscaling.vpa.maxAllowed` | VPA Max allowed resources for the pod | `{}` | +| `rootCoord.autoscaling.vpa.minAllowed` | VPA Min allowed resources for the pod | `{}` | +| `rootCoord.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod | `Auto` | +| `rootCoord.autoscaling.hpa.enabled` | Enable HPA for Milvus Data Plane | `false` | +| `rootCoord.autoscaling.hpa.annotations` | Annotations for HPA resource | `{}` | +| `rootCoord.autoscaling.hpa.minReplicas` | Minimum number of Milvus Data Plane replicas | `""` | +| `rootCoord.autoscaling.hpa.maxReplicas` | Maximum number of Milvus Data Plane replicas | `""` | +| `rootCoord.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `rootCoord.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | + +### Root Coordinator Traffic Exposure Parameters + +| Name | Description | Value | +| ------------------------------------------------- | ---------------------------------------------------------------- | ----------- | +| `rootCoord.service.type` | Root Coordinator service type | `ClusterIP` | +| `rootCoord.service.ports.grpc` | Root Coordinator GRPC service port | `19530` | +| `rootCoord.service.ports.metrics` | Root Coordinator Metrics service port | `9091` | +| `rootCoord.service.nodePorts.grpc` | Node port for GRPC | `""` | +| `rootCoord.service.nodePorts.metrics` | Node port for Metrics | `""` | +| `rootCoord.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `rootCoord.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `rootCoord.service.clusterIP` | Root Coordinator service Cluster IP | `""` | +| `rootCoord.service.loadBalancerIP` | Root Coordinator service Load Balancer IP | `""` | +| `rootCoord.service.loadBalancerSourceRanges` | Root Coordinator service Load Balancer sources | `[]` | +| `rootCoord.service.externalTrafficPolicy` | Root Coordinator service external traffic policy | `Cluster` | +| `rootCoord.service.annotations` | Additional custom annotations for Root Coordinator service | `{}` | +| `rootCoord.service.extraPorts` | Extra ports to expose in the Root Coordinator service | `[]` | +| `rootCoord.networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `rootCoord.networkPolicy.allowExternal` | The Policy model to apply | `true` | +| `rootCoord.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `rootCoord.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `rootCoord.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `rootCoord.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | + +### Root Coordinator Metrics Parameters + +| Name | Description | Value | +| ---------------------------------------------------- | ------------------------------------------------------------------------------------- | ------- | +| `rootCoord.metrics.enabled` | Enable metrics | `false` | +| `rootCoord.metrics.annotations` | Annotations for the server service in order to scrape metrics | `{}` | +| `rootCoord.metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | +| `rootCoord.metrics.serviceMonitor.annotations` | Annotations for the ServiceMonitor Resource | `""` | +| `rootCoord.metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` | +| `rootCoord.metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `rootCoord.metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `rootCoord.metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `rootCoord.metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `rootCoord.metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `rootCoord.metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `rootCoord.metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `rootCoord.metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | + +### Query Coordinator Deployment Parameters + +| Name | Description | Value | +| -------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------- | +| `queryCoord.enabled` | Enable Query Coordinator deployment | `true` | +| `queryCoord.extraEnvVars` | Array with extra environment variables to add to data coordinator nodes | `[]` | +| `queryCoord.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for data coordinator nodes | `""` | +| `queryCoord.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for data coordinator nodes | `""` | +| `queryCoord.defaultConfig` | Default override configuration from the common set in milvus.defaultConfig | `""` | +| `queryCoord.existingConfigMap` | name of a ConfigMap with existing configuration for the default configuration | `""` | +| `queryCoord.extraConfig` | Override configuration | `{}` | +| `queryCoord.extraConfigExistingConfigMap` | name of a ConfigMap with existing configuration for the Dashboard | `""` | +| `queryCoord.command` | Override default container command (useful when using custom images) | `[]` | +| `queryCoord.args` | Override default container args (useful when using custom images) | `[]` | +| `queryCoord.replicaCount` | Number of Query Coordinator replicas to deploy | `1` | +| `queryCoord.containerPorts.grpc` | GRPC port for Query Coordinator | `19530` | +| `queryCoord.containerPorts.metrics` | Metrics port for Query Coordinator | `9091` | +| `queryCoord.livenessProbe.enabled` | Enable livenessProbe on Query Coordinator nodes | `true` | +| `queryCoord.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `queryCoord.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `queryCoord.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `queryCoord.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `queryCoord.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `queryCoord.readinessProbe.enabled` | Enable readinessProbe on Query Coordinator nodes | `true` | +| `queryCoord.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `queryCoord.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `queryCoord.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `queryCoord.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `queryCoord.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `queryCoord.startupProbe.enabled` | Enable startupProbe on Query Coordinator containers | `false` | +| `queryCoord.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `queryCoord.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `queryCoord.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `queryCoord.startupProbe.failureThreshold` | Failure threshold for startupProbe | `5` | +| `queryCoord.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `queryCoord.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `queryCoord.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `queryCoord.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `queryCoord.resources.limits` | The resources limits for the data coordinator containers | `{}` | +| `queryCoord.resources.requests` | The requested resources for the data coordinator containers | `{}` | +| `queryCoord.podSecurityContext.enabled` | Enabled Query Coordinator pods' Security Context | `true` | +| `queryCoord.podSecurityContext.fsGroup` | Set Query Coordinator pod's Security Context fsGroup | `1001` | +| `queryCoord.podSecurityContext.seccompProfile.type` | Set Query Coordinator container's Security Context seccomp profile | `RuntimeDefault` | +| `queryCoord.containerSecurityContext.enabled` | Enabled Query Coordinator containers' Security Context | `true` | +| `queryCoord.containerSecurityContext.runAsUser` | Set Query Coordinator containers' Security Context runAsUser | `1001` | +| `queryCoord.containerSecurityContext.runAsNonRoot` | Set Query Coordinator containers' Security Context runAsNonRoot | `true` | +| `queryCoord.containerSecurityContext.readOnlyRootFilesystem` | Set Query Coordinator containers' Security Context runAsNonRoot | `true` | +| `queryCoord.containerSecurityContext.allowPrivilegeEscalation` | Set Query Coordinator container's privilege escalation | `false` | +| `queryCoord.containerSecurityContext.capabilities.drop` | Set Query Coordinator container's Security Context runAsNonRoot | `["ALL"]` | +| `queryCoord.lifecycleHooks` | for the data coordinator container(s) to automate configuration before or after startup | `{}` | +| `queryCoord.runtimeClassName` | Name of the runtime class to be used by pod(s) | `""` | +| `queryCoord.hostAliases` | data coordinator pods host aliases | `[]` | +| `queryCoord.podLabels` | Extra labels for data coordinator pods | `{}` | +| `queryCoord.podAnnotations` | Annotations for data coordinator pods | `{}` | +| `queryCoord.podAffinityPreset` | Pod affinity preset. Ignored if `data coordinator.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `queryCoord.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `data coordinator.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `queryCoord.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `data coordinator.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `queryCoord.nodeAffinityPreset.key` | Node label key to match. Ignored if `data coordinator.affinity` is set | `""` | +| `queryCoord.nodeAffinityPreset.values` | Node label values to match. Ignored if `data coordinator.affinity` is set | `[]` | +| `queryCoord.affinity` | Affinity for Query Coordinator pods assignment | `{}` | +| `queryCoord.nodeSelector` | Node labels for Query Coordinator pods assignment | `{}` | +| `queryCoord.tolerations` | Tolerations for Query Coordinator pods assignment | `[]` | +| `queryCoord.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains | `[]` | +| `queryCoord.priorityClassName` | Query Coordinator pods' priorityClassName | `""` | +| `queryCoord.schedulerName` | Kubernetes pod scheduler registry | `""` | +| `queryCoord.updateStrategy.type` | Query Coordinator statefulset strategy type | `RollingUpdate` | +| `queryCoord.updateStrategy.rollingUpdate` | Query Coordinator statefulset rolling update configuration parameters | `{}` | +| `queryCoord.extraVolumes` | Optionally specify extra list of additional volumes for the Query Coordinator pod(s) | `[]` | +| `queryCoord.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Query Coordinator container(s) | `[]` | +| `queryCoord.sidecars` | Add additional sidecar containers to the Query Coordinator pod(s) | `[]` | +| `queryCoord.enableDefaultInitContainers` | Deploy default init containers | `true` | +| `queryCoord.initContainers` | Add additional init containers to the Query Coordinator pod(s) | `[]` | +| `queryCoord.serviceAccount.create` | Enable creation of ServiceAccount for Query Coordinator pods | `false` | +| `queryCoord.serviceAccount.name` | The name of the ServiceAccount to use | `""` | +| `queryCoord.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` | +| `queryCoord.serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `queryCoord.pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | +| `queryCoord.pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `1` | +| `queryCoord.pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | + +### Query Coordinator Autoscaling configuration + +| Name | Description | Value | +| ---------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `queryCoord.autoscaling.vpa.enabled` | Enable VPA | `false` | +| `queryCoord.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `queryCoord.autoscaling.vpa.controlledResources` | VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory | `[]` | +| `queryCoord.autoscaling.vpa.maxAllowed` | VPA Max allowed resources for the pod | `{}` | +| `queryCoord.autoscaling.vpa.minAllowed` | VPA Min allowed resources for the pod | `{}` | +| `queryCoord.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod | `Auto` | +| `queryCoord.autoscaling.hpa.enabled` | Enable HPA for Milvus Data Plane | `false` | +| `queryCoord.autoscaling.hpa.annotations` | Annotations for HPA resource | `{}` | +| `queryCoord.autoscaling.hpa.minReplicas` | Minimum number of Milvus Data Plane replicas | `""` | +| `queryCoord.autoscaling.hpa.maxReplicas` | Maximum number of Milvus Data Plane replicas | `""` | +| `queryCoord.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `queryCoord.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | + +### Query Coordinator Traffic Exposure Parameters + +| Name | Description | Value | +| -------------------------------------------------- | ---------------------------------------------------------------- | ----------- | +| `queryCoord.service.type` | Query Coordinator service type | `ClusterIP` | +| `queryCoord.service.ports.grpc` | Query Coordinator GRPC service port | `19530` | +| `queryCoord.service.ports.metrics` | Query Coordinator Metrics service port | `9091` | +| `queryCoord.service.nodePorts.grpc` | Node port for GRPC | `""` | +| `queryCoord.service.nodePorts.metrics` | Node port for Metrics | `""` | +| `queryCoord.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `queryCoord.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `queryCoord.service.clusterIP` | Query Coordinator service Cluster IP | `""` | +| `queryCoord.service.loadBalancerIP` | Query Coordinator service Load Balancer IP | `""` | +| `queryCoord.service.loadBalancerSourceRanges` | Query Coordinator service Load Balancer sources | `[]` | +| `queryCoord.service.externalTrafficPolicy` | Query Coordinator service external traffic policy | `Cluster` | +| `queryCoord.service.annotations` | Additional custom annotations for Query Coordinator service | `{}` | +| `queryCoord.service.extraPorts` | Extra ports to expose in the Query Coordinator service | `[]` | +| `queryCoord.networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `queryCoord.networkPolicy.allowExternal` | The Policy model to apply | `true` | +| `queryCoord.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `queryCoord.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `queryCoord.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `queryCoord.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | + +### Query Coordinator Metrics Parameters + +| Name | Description | Value | +| ----------------------------------------------------- | ------------------------------------------------------------------------------------- | ------- | +| `queryCoord.metrics.enabled` | Enable metrics | `false` | +| `queryCoord.metrics.annotations` | Annotations for the server service in order to scrape metrics | `{}` | +| `queryCoord.metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | +| `queryCoord.metrics.serviceMonitor.annotations` | Annotations for the ServiceMonitor Resource | `""` | +| `queryCoord.metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` | +| `queryCoord.metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `queryCoord.metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `queryCoord.metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `queryCoord.metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `queryCoord.metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `queryCoord.metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `queryCoord.metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `queryCoord.metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | + +### Index Coordinator Deployment Parameters + +| Name | Description | Value | +| -------------------------------------------------------------- | ---------------------------------------------------------------------------------------------------------- | ---------------- | +| `indexCoord.enabled` | Enable Index Coordinator deployment | `true` | +| `indexCoord.extraEnvVars` | Array with extra environment variables to add to data coordinator nodes | `[]` | +| `indexCoord.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for data coordinator nodes | `""` | +| `indexCoord.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for data coordinator nodes | `""` | +| `indexCoord.defaultConfig` | Default override configuration from the common set in milvus.defaultConfig | `""` | +| `indexCoord.existingConfigMap` | name of a ConfigMap with existing configuration for the default configuration | `""` | +| `indexCoord.extraConfig` | Override configuration | `{}` | +| `indexCoord.extraConfigExistingConfigMap` | name of a ConfigMap with existing configuration for the Dashboard | `""` | +| `indexCoord.command` | Override default container command (useful when using custom images) | `[]` | +| `indexCoord.args` | Override default container args (useful when using custom images) | `[]` | +| `indexCoord.replicaCount` | Number of Index Coordinator replicas to deploy | `1` | +| `indexCoord.containerPorts.grpc` | GRPC port for Index Coordinator | `19530` | +| `indexCoord.containerPorts.metrics` | Metrics port for Index Coordinator | `9091` | +| `indexCoord.livenessProbe.enabled` | Enable livenessProbe on Index Coordinator nodes | `true` | +| `indexCoord.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `indexCoord.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `indexCoord.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `indexCoord.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `indexCoord.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `indexCoord.readinessProbe.enabled` | Enable readinessProbe on Index Coordinator nodes | `true` | +| `indexCoord.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `indexCoord.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `indexCoord.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `indexCoord.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `indexCoord.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `indexCoord.startupProbe.enabled` | Enable startupProbe on Index Coordinator containers | `false` | +| `indexCoord.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `indexCoord.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `indexCoord.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `indexCoord.startupProbe.failureThreshold` | Failure threshold for startupProbe | `5` | +| `indexCoord.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `indexCoord.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `indexCoord.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `indexCoord.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `indexCoord.resources.limits` | The resources limits for the data coordinator containers | `{}` | +| `indexCoord.resources.requests` | The requested resources for the data coordinator containers | `{}` | +| `indexCoord.podSecurityContext.enabled` | Enabled Index Coordinator pods' Security Context | `true` | +| `indexCoord.podSecurityContext.fsGroup` | Set Index Coordinator pod's Security Context fsGroup | `1001` | +| `indexCoord.podSecurityContext.seccompProfile.type` | Set Index Coordinator container's Security Context seccomp profile | `RuntimeDefault` | +| `indexCoord.containerSecurityContext.enabled` | Enabled Index Coordinator containers' Security Context | `true` | +| `indexCoord.containerSecurityContext.runAsUser` | Set Index Coordinator containers' Security Context runAsUser | `1001` | +| `indexCoord.containerSecurityContext.runAsNonRoot` | Set Index Coordinator containers' Security Context runAsNonRoot | `true` | +| `indexCoord.containerSecurityContext.readOnlyRootFilesystem` | Set Index Coordinator containers' Security Context runAsNonRoot | `true` | +| `indexCoord.containerSecurityContext.allowPrivilegeEscalation` | Set Index Coordinator container's privilege escalation | `false` | +| `indexCoord.containerSecurityContext.capabilities.drop` | Set Index Coordinator container's Security Context runAsNonRoot | `["ALL"]` | +| `indexCoord.lifecycleHooks` | for the data coordinator container(s) to automate configuration before or after startup | `{}` | +| `indexCoord.runtimeClassName` | Name of the runtime class to be used by pod(s) | `""` | +| `indexCoord.hostAliases` | data coordinator pods host aliases | `[]` | +| `indexCoord.podLabels` | Extra labels for data coordinator pods | `{}` | +| `indexCoord.podAnnotations` | Annotations for data coordinator pods | `{}` | +| `indexCoord.podAffinityPreset` | Pod affinity preset. Ignored if `data coordinator.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `indexCoord.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `data coordinator.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `indexCoord.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `data coordinator.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `indexCoord.nodeAffinityPreset.key` | Node label key to match. Ignored if `data coordinator.affinity` is set | `""` | +| `indexCoord.nodeAffinityPreset.values` | Node label values to match. Ignored if `data coordinator.affinity` is set | `[]` | +| `indexCoord.affinity` | Affinity for Index Coordinator pods assignment | `{}` | +| `indexCoord.nodeSelector` | Node labels for Index Coordinator pods assignment | `{}` | +| `indexCoord.tolerations` | Tolerations for Index Coordinator pods assignment | `[]` | +| `indexCoord.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains | `[]` | +| `indexCoord.priorityClassName` | Index Coordinator pods' priorityClassName | `""` | +| `indexCoord.schedulerName` | Kubernetes pod scheduler registry | `""` | +| `indexCoord.updateStrategy.type` | Index Coordinator statefulset strategy type | `RollingUpdate` | +| `indexCoord.updateStrategy.rollingUpdate` | Index Coordinator statefulset rolling update configuration parameters | `{}` | +| `indexCoord.extraVolumes` | Optionally specify extra list of additional volumes for the Index Coordinator pod(s) | `[]` | +| `indexCoord.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Index Coordinator container(s) | `[]` | +| `indexCoord.sidecars` | Add additional sidecar containers to the Index Coordinator pod(s) | `[]` | +| `indexCoord.enableDefaultInitContainers` | Deploy default init containers | `true` | +| `indexCoord.initContainers` | Add additional init containers to the Index Coordinator pod(s) | `[]` | +| `indexCoord.serviceAccount.create` | Enable creation of ServiceAccount for Index Coordinator pods | `false` | +| `indexCoord.serviceAccount.name` | The name of the ServiceAccount to use | `""` | +| `indexCoord.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` | +| `indexCoord.serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `indexCoord.pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | +| `indexCoord.pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `1` | +| `indexCoord.pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | + +### Index Coordinator Autoscaling configuration + +| Name | Description | Value | +| ---------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `indexCoord.autoscaling.vpa.enabled` | Enable VPA | `false` | +| `indexCoord.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `indexCoord.autoscaling.vpa.controlledResources` | VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory | `[]` | +| `indexCoord.autoscaling.vpa.maxAllowed` | VPA Max allowed resources for the pod | `{}` | +| `indexCoord.autoscaling.vpa.minAllowed` | VPA Min allowed resources for the pod | `{}` | +| `indexCoord.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod | `Auto` | +| `indexCoord.autoscaling.hpa.enabled` | Enable HPA for Milvus Data Plane | `false` | +| `indexCoord.autoscaling.hpa.annotations` | Annotations for HPA resource | `{}` | +| `indexCoord.autoscaling.hpa.minReplicas` | Minimum number of Milvus Data Plane replicas | `""` | +| `indexCoord.autoscaling.hpa.maxReplicas` | Maximum number of Milvus Data Plane replicas | `""` | +| `indexCoord.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `indexCoord.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | + +### Index Coordinator Traffic Exposure Parameters + +| Name | Description | Value | +| -------------------------------------------------- | ---------------------------------------------------------------- | ----------- | +| `indexCoord.service.type` | Index Coordinator service type | `ClusterIP` | +| `indexCoord.service.ports.grpc` | Index Coordinator GRPC service port | `19530` | +| `indexCoord.service.ports.metrics` | Index Coordinator Metrics service port | `9091` | +| `indexCoord.service.nodePorts.grpc` | Node port for GRPC | `""` | +| `indexCoord.service.nodePorts.metrics` | Node port for Metrics | `""` | +| `indexCoord.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `indexCoord.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `indexCoord.service.clusterIP` | Index Coordinator service Cluster IP | `""` | +| `indexCoord.service.loadBalancerIP` | Index Coordinator service Load Balancer IP | `""` | +| `indexCoord.service.loadBalancerSourceRanges` | Index Coordinator service Load Balancer sources | `[]` | +| `indexCoord.service.externalTrafficPolicy` | Index Coordinator service external traffic policy | `Cluster` | +| `indexCoord.service.annotations` | Additional custom annotations for Index Coordinator service | `{}` | +| `indexCoord.service.extraPorts` | Extra ports to expose in the Index Coordinator service | `[]` | +| `indexCoord.networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `indexCoord.networkPolicy.allowExternal` | The Policy model to apply | `true` | +| `indexCoord.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `indexCoord.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `indexCoord.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `indexCoord.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | + +### Index Coordinator Metrics Parameters + +| Name | Description | Value | +| ----------------------------------------------------- | ------------------------------------------------------------------------------------- | ------- | +| `indexCoord.metrics.enabled` | Enable metrics | `false` | +| `indexCoord.metrics.annotations` | Annotations for the server service in order to scrape metrics | `{}` | +| `indexCoord.metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | +| `indexCoord.metrics.serviceMonitor.annotations` | Annotations for the ServiceMonitor Resource | `""` | +| `indexCoord.metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` | +| `indexCoord.metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `indexCoord.metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `indexCoord.metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `indexCoord.metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `indexCoord.metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `indexCoord.metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `indexCoord.metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `indexCoord.metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | + +### Data Node Deployment Parameters + +| Name | Description | Value | +| ------------------------------------------------------------ | --------------------------------------------------------------------------------------------------- | ---------------- | +| `dataNode.enabled` | Enable Data Node deployment | `true` | +| `dataNode.extraEnvVars` | Array with extra environment variables to add to data node nodes | `[]` | +| `dataNode.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for data node nodes | `""` | +| `dataNode.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for data node nodes | `""` | +| `dataNode.defaultConfig` | Default override configuration from the common set in milvus.defaultConfig | `""` | +| `dataNode.existingConfigMap` | name of a ConfigMap with existing configuration for the default configuration | `""` | +| `dataNode.extraConfig` | Override configuration | `{}` | +| `dataNode.extraConfigExistingConfigMap` | name of a ConfigMap with existing configuration for the Dashboard | `""` | +| `dataNode.command` | Override default container command (useful when using custom images) | `[]` | +| `dataNode.args` | Override default container args (useful when using custom images) | `[]` | +| `dataNode.replicaCount` | Number of Data Node replicas to deploy | `1` | +| `dataNode.containerPorts.grpc` | GRPC port for Data Node | `19530` | +| `dataNode.containerPorts.metrics` | Metrics port for Data Node | `9091` | +| `dataNode.livenessProbe.enabled` | Enable livenessProbe on Data Node nodes | `true` | +| `dataNode.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `dataNode.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `dataNode.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `dataNode.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `dataNode.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `dataNode.readinessProbe.enabled` | Enable readinessProbe on Data Node nodes | `true` | +| `dataNode.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `dataNode.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `dataNode.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `dataNode.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `dataNode.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `dataNode.startupProbe.enabled` | Enable startupProbe on Data Node containers | `false` | +| `dataNode.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `dataNode.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `dataNode.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `dataNode.startupProbe.failureThreshold` | Failure threshold for startupProbe | `5` | +| `dataNode.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `dataNode.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `dataNode.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `dataNode.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `dataNode.resources.limits` | The resources limits for the data node containers | `{}` | +| `dataNode.resources.requests` | The requested resources for the data node containers | `{}` | +| `dataNode.podSecurityContext.enabled` | Enabled Data Node pods' Security Context | `true` | +| `dataNode.podSecurityContext.fsGroup` | Set Data Node pod's Security Context fsGroup | `1001` | +| `dataNode.podSecurityContext.seccompProfile.type` | Set Data Node container's Security Context seccomp profile | `RuntimeDefault` | +| `dataNode.containerSecurityContext.enabled` | Enabled Data Node containers' Security Context | `true` | +| `dataNode.containerSecurityContext.runAsUser` | Set Data Node containers' Security Context runAsUser | `1001` | +| `dataNode.containerSecurityContext.runAsNonRoot` | Set Data Node containers' Security Context runAsNonRoot | `true` | +| `dataNode.containerSecurityContext.readOnlyRootFilesystem` | Set Data Node containers' Security Context runAsNonRoot | `true` | +| `dataNode.containerSecurityContext.allowPrivilegeEscalation` | Set Data Node container's privilege escalation | `false` | +| `dataNode.containerSecurityContext.capabilities.drop` | Set Data Node container's Security Context runAsNonRoot | `["ALL"]` | +| `dataNode.lifecycleHooks` | for the data node container(s) to automate configuration before or after startup | `{}` | +| `dataNode.runtimeClassName` | Name of the runtime class to be used by pod(s) | `""` | +| `dataNode.hostAliases` | data node pods host aliases | `[]` | +| `dataNode.podLabels` | Extra labels for data node pods | `{}` | +| `dataNode.podAnnotations` | Annotations for data node pods | `{}` | +| `dataNode.podAffinityPreset` | Pod affinity preset. Ignored if `data node.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `dataNode.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `data node.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `dataNode.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `data node.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `dataNode.nodeAffinityPreset.key` | Node label key to match. Ignored if `data node.affinity` is set | `""` | +| `dataNode.nodeAffinityPreset.values` | Node label values to match. Ignored if `data node.affinity` is set | `[]` | +| `dataNode.affinity` | Affinity for Data Node pods assignment | `{}` | +| `dataNode.nodeSelector` | Node labels for Data Node pods assignment | `{}` | +| `dataNode.tolerations` | Tolerations for Data Node pods assignment | `[]` | +| `dataNode.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains | `[]` | +| `dataNode.priorityClassName` | Data Node pods' priorityClassName | `""` | +| `dataNode.schedulerName` | Kubernetes pod scheduler registry | `""` | +| `dataNode.updateStrategy.type` | Data Node statefulset strategy type | `RollingUpdate` | +| `dataNode.updateStrategy.rollingUpdate` | Data Node statefulset rolling update configuration parameters | `{}` | +| `dataNode.extraVolumes` | Optionally specify extra list of additional volumes for the Data Node pod(s) | `[]` | +| `dataNode.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Data Node container(s) | `[]` | +| `dataNode.sidecars` | Add additional sidecar containers to the Data Node pod(s) | `[]` | +| `dataNode.enableDefaultInitContainers` | Deploy default init containers | `true` | +| `dataNode.initContainers` | Add additional init containers to the Data Node pod(s) | `[]` | +| `dataNode.serviceAccount.create` | Enable creation of ServiceAccount for Data Node pods | `false` | +| `dataNode.serviceAccount.name` | The name of the ServiceAccount to use | `""` | +| `dataNode.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` | +| `dataNode.serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `dataNode.pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | +| `dataNode.pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `1` | +| `dataNode.pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | + +### Data Node Autoscaling configuration + +| Name | Description | Value | +| -------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `dataNode.autoscaling.vpa.enabled` | Enable VPA | `false` | +| `dataNode.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `dataNode.autoscaling.vpa.controlledResources` | VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory | `[]` | +| `dataNode.autoscaling.vpa.maxAllowed` | VPA Max allowed resources for the pod | `{}` | +| `dataNode.autoscaling.vpa.minAllowed` | VPA Min allowed resources for the pod | `{}` | +| `dataNode.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod | `Auto` | +| `dataNode.autoscaling.hpa.enabled` | Enable HPA for Milvus Data Plane | `false` | +| `dataNode.autoscaling.hpa.annotations` | Annotations for HPA resource | `{}` | +| `dataNode.autoscaling.hpa.minReplicas` | Minimum number of Milvus Data Plane replicas | `""` | +| `dataNode.autoscaling.hpa.maxReplicas` | Maximum number of Milvus Data Plane replicas | `""` | +| `dataNode.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `dataNode.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | + +### Data Node Traffic Exposure Parameters + +| Name | Description | Value | +| ------------------------------------------------ | ---------------------------------------------------------------- | ----------- | +| `dataNode.service.type` | Data Node service type | `ClusterIP` | +| `dataNode.service.ports.grpc` | Data Node GRPC service port | `19530` | +| `dataNode.service.ports.metrics` | Data Node Metrics service port | `9091` | +| `dataNode.service.nodePorts.grpc` | Node port for GRPC | `""` | +| `dataNode.service.nodePorts.metrics` | Node port for Metrics | `""` | +| `dataNode.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `dataNode.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `dataNode.service.clusterIP` | Data Node service Cluster IP | `""` | +| `dataNode.service.loadBalancerIP` | Data Node service Load Balancer IP | `""` | +| `dataNode.service.loadBalancerSourceRanges` | Data Node service Load Balancer sources | `[]` | +| `dataNode.service.externalTrafficPolicy` | Data Node service external traffic policy | `Cluster` | +| `dataNode.service.annotations` | Additional custom annotations for Data Node service | `{}` | +| `dataNode.service.extraPorts` | Extra ports to expose in the Data Node service | `[]` | +| `dataNode.networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `dataNode.networkPolicy.allowExternal` | The Policy model to apply | `true` | +| `dataNode.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `dataNode.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `dataNode.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `dataNode.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | + +### Data Node Metrics Parameters + +| Name | Description | Value | +| --------------------------------------------------- | ------------------------------------------------------------------------------------- | ------- | +| `dataNode.metrics.enabled` | Enable metrics | `false` | +| `dataNode.metrics.annotations` | Annotations for the server service in order to scrape metrics | `{}` | +| `dataNode.metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | +| `dataNode.metrics.serviceMonitor.annotations` | Annotations for the ServiceMonitor Resource | `""` | +| `dataNode.metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` | +| `dataNode.metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `dataNode.metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `dataNode.metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `dataNode.metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `dataNode.metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `dataNode.metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `dataNode.metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `dataNode.metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | + +### Query Node Deployment Parameters + +| Name | Description | Value | +| ------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- | ---------------- | +| `queryNode.enabled` | Enable Query Node deployment | `true` | +| `queryNode.extraEnvVars` | Array with extra environment variables to add to data node nodes | `[]` | +| `queryNode.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for data node nodes | `""` | +| `queryNode.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for data node nodes | `""` | +| `queryNode.defaultConfig` | Default override configuration from the common set in milvus.defaultConfig | `""` | +| `queryNode.existingConfigMap` | name of a ConfigMap with existing configuration for the default configuration | `""` | +| `queryNode.extraConfig` | Override configuration | `{}` | +| `queryNode.extraConfigExistingConfigMap` | name of a ConfigMap with existing configuration for the Dashboard | `""` | +| `queryNode.command` | Override default container command (useful when using custom images) | `[]` | +| `queryNode.args` | Override default container args (useful when using custom images) | `[]` | +| `queryNode.replicaCount` | Number of Query Node replicas to deploy | `1` | +| `queryNode.containerPorts.grpc` | GRPC port for Query Node | `19530` | +| `queryNode.containerPorts.metrics` | Metrics port for Query Node | `9091` | +| `queryNode.livenessProbe.enabled` | Enable livenessProbe on Query Node nodes | `true` | +| `queryNode.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `queryNode.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `queryNode.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `queryNode.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `queryNode.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `queryNode.readinessProbe.enabled` | Enable readinessProbe on Query Node nodes | `true` | +| `queryNode.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `queryNode.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `queryNode.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `queryNode.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `queryNode.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `queryNode.startupProbe.enabled` | Enable startupProbe on Query Node containers | `false` | +| `queryNode.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `queryNode.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `queryNode.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `queryNode.startupProbe.failureThreshold` | Failure threshold for startupProbe | `5` | +| `queryNode.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `queryNode.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `queryNode.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `queryNode.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `queryNode.resources.limits` | The resources limits for the data node containers | `{}` | +| `queryNode.resources.requests` | The requested resources for the data node containers | `{}` | +| `queryNode.podSecurityContext.enabled` | Enabled Query Node pods' Security Context | `true` | +| `queryNode.podSecurityContext.fsGroup` | Set Query Node pod's Security Context fsGroup | `1001` | +| `queryNode.podSecurityContext.seccompProfile.type` | Set Query Node container's Security Context seccomp profile | `RuntimeDefault` | +| `queryNode.containerSecurityContext.enabled` | Enabled Query Node containers' Security Context | `true` | +| `queryNode.containerSecurityContext.runAsUser` | Set Query Node containers' Security Context runAsUser | `1001` | +| `queryNode.containerSecurityContext.runAsNonRoot` | Set Query Node containers' Security Context runAsNonRoot | `true` | +| `queryNode.containerSecurityContext.readOnlyRootFilesystem` | Set Query Node containers' Security Context runAsNonRoot | `true` | +| `queryNode.containerSecurityContext.allowPrivilegeEscalation` | Set Query Node container's privilege escalation | `false` | +| `queryNode.containerSecurityContext.capabilities.drop` | Set Query Node container's Security Context runAsNonRoot | `["ALL"]` | +| `queryNode.lifecycleHooks` | for the data node container(s) to automate configuration before or after startup | `{}` | +| `queryNode.runtimeClassName` | Name of the runtime class to be used by pod(s) | `""` | +| `queryNode.hostAliases` | data node pods host aliases | `[]` | +| `queryNode.podLabels` | Extra labels for data node pods | `{}` | +| `queryNode.podAnnotations` | Annotations for data node pods | `{}` | +| `queryNode.podAffinityPreset` | Pod affinity preset. Ignored if `data node.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `queryNode.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `data node.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `queryNode.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `data node.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `queryNode.nodeAffinityPreset.key` | Node label key to match. Ignored if `data node.affinity` is set | `""` | +| `queryNode.nodeAffinityPreset.values` | Node label values to match. Ignored if `data node.affinity` is set | `[]` | +| `queryNode.affinity` | Affinity for Query Node pods assignment | `{}` | +| `queryNode.nodeSelector` | Node labels for Query Node pods assignment | `{}` | +| `queryNode.tolerations` | Tolerations for Query Node pods assignment | `[]` | +| `queryNode.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains | `[]` | +| `queryNode.priorityClassName` | Query Node pods' priorityClassName | `""` | +| `queryNode.schedulerName` | Kubernetes pod scheduler registry | `""` | +| `queryNode.updateStrategy.type` | Query Node statefulset strategy type | `RollingUpdate` | +| `queryNode.updateStrategy.rollingUpdate` | Query Node statefulset rolling update configuration parameters | `{}` | +| `queryNode.extraVolumes` | Optionally specify extra list of additional volumes for the Query Node pod(s) | `[]` | +| `queryNode.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Query Node container(s) | `[]` | +| `queryNode.sidecars` | Add additional sidecar containers to the Query Node pod(s) | `[]` | +| `queryNode.enableDefaultInitContainers` | Deploy default init containers | `true` | +| `queryNode.initContainers` | Add additional init containers to the Query Node pod(s) | `[]` | +| `queryNode.serviceAccount.create` | Enable creation of ServiceAccount for Query Node pods | `false` | +| `queryNode.serviceAccount.name` | The name of the ServiceAccount to use | `""` | +| `queryNode.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` | +| `queryNode.serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `queryNode.pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | +| `queryNode.pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `1` | +| `queryNode.pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | + +### Query Node Autoscaling configuration + +| Name | Description | Value | +| --------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `queryNode.autoscaling.vpa.enabled` | Enable VPA | `false` | +| `queryNode.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `queryNode.autoscaling.vpa.controlledResources` | VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory | `[]` | +| `queryNode.autoscaling.vpa.maxAllowed` | VPA Max allowed resources for the pod | `{}` | +| `queryNode.autoscaling.vpa.minAllowed` | VPA Min allowed resources for the pod | `{}` | +| `queryNode.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod | `Auto` | +| `queryNode.autoscaling.hpa.enabled` | Enable HPA for Milvus Data Plane | `false` | +| `queryNode.autoscaling.hpa.annotations` | Annotations for HPA resource | `{}` | +| `queryNode.autoscaling.hpa.minReplicas` | Minimum number of Milvus Data Plane replicas | `""` | +| `queryNode.autoscaling.hpa.maxReplicas` | Maximum number of Milvus Data Plane replicas | `""` | +| `queryNode.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `queryNode.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | + +### Query Node Traffic Exposure Parameters + +| Name | Description | Value | +| ------------------------------------------------- | ---------------------------------------------------------------- | ----------- | +| `queryNode.service.type` | Query Node service type | `ClusterIP` | +| `queryNode.service.ports.grpc` | Query Node GRPC service port | `19530` | +| `queryNode.service.ports.metrics` | Query Node Metrics service port | `9091` | +| `queryNode.service.nodePorts.grpc` | Node port for GRPC | `""` | +| `queryNode.service.nodePorts.metrics` | Node port for Metrics | `""` | +| `queryNode.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `queryNode.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `queryNode.service.clusterIP` | Query Node service Cluster IP | `""` | +| `queryNode.service.loadBalancerIP` | Query Node service Load Balancer IP | `""` | +| `queryNode.service.loadBalancerSourceRanges` | Query Node service Load Balancer sources | `[]` | +| `queryNode.service.externalTrafficPolicy` | Query Node service external traffic policy | `Cluster` | +| `queryNode.service.annotations` | Additional custom annotations for Query Node service | `{}` | +| `queryNode.service.extraPorts` | Extra ports to expose in the Query Node service | `[]` | +| `queryNode.networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `queryNode.networkPolicy.allowExternal` | The Policy model to apply | `true` | +| `queryNode.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `queryNode.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `queryNode.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `queryNode.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | + +### Query Node Metrics Parameters + +| Name | Description | Value | +| ---------------------------------------------------- | ------------------------------------------------------------------------------------- | ------- | +| `queryNode.metrics.enabled` | Enable metrics | `false` | +| `queryNode.metrics.annotations` | Annotations for the server service in order to scrape metrics | `{}` | +| `queryNode.metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | +| `queryNode.metrics.serviceMonitor.annotations` | Annotations for the ServiceMonitor Resource | `""` | +| `queryNode.metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` | +| `queryNode.metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `queryNode.metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `queryNode.metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `queryNode.metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `queryNode.metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `queryNode.metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `queryNode.metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `queryNode.metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | + +### Index Node Deployment Parameters + +| Name | Description | Value | +| ------------------------------------------------------------- | --------------------------------------------------------------------------------------------------- | ---------------- | +| `indexNode.enabled` | Enable Index Node deployment | `true` | +| `indexNode.extraEnvVars` | Array with extra environment variables to add to data node nodes | `[]` | +| `indexNode.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for data node nodes | `""` | +| `indexNode.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for data node nodes | `""` | +| `indexNode.defaultConfig` | Default override configuration from the common set in milvus.defaultConfig | `""` | +| `indexNode.existingConfigMap` | name of a ConfigMap with existing configuration for the default configuration | `""` | +| `indexNode.extraConfig` | Override configuration | `{}` | +| `indexNode.extraConfigExistingConfigMap` | name of a ConfigMap with existing configuration for the Dashboard | `""` | +| `indexNode.command` | Override default container command (useful when using custom images) | `[]` | +| `indexNode.args` | Override default container args (useful when using custom images) | `[]` | +| `indexNode.replicaCount` | Number of Index Node replicas to deploy | `1` | +| `indexNode.containerPorts.grpc` | GRPC port for Index Node | `19530` | +| `indexNode.containerPorts.metrics` | Metrics port for Index Node | `9091` | +| `indexNode.livenessProbe.enabled` | Enable livenessProbe on Index Node nodes | `true` | +| `indexNode.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `indexNode.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `indexNode.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `indexNode.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `indexNode.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `indexNode.readinessProbe.enabled` | Enable readinessProbe on Index Node nodes | `true` | +| `indexNode.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `indexNode.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `indexNode.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `indexNode.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `indexNode.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `indexNode.startupProbe.enabled` | Enable startupProbe on Index Node containers | `false` | +| `indexNode.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `indexNode.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `indexNode.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `indexNode.startupProbe.failureThreshold` | Failure threshold for startupProbe | `5` | +| `indexNode.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `indexNode.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `indexNode.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `indexNode.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `indexNode.resources.limits` | The resources limits for the data node containers | `{}` | +| `indexNode.resources.requests` | The requested resources for the data node containers | `{}` | +| `indexNode.podSecurityContext.enabled` | Enabled Index Node pods' Security Context | `true` | +| `indexNode.podSecurityContext.fsGroup` | Set Index Node pod's Security Context fsGroup | `1001` | +| `indexNode.podSecurityContext.seccompProfile.type` | Set Index Node container's Security Context seccomp profile | `RuntimeDefault` | +| `indexNode.containerSecurityContext.enabled` | Enabled Index Node containers' Security Context | `true` | +| `indexNode.containerSecurityContext.runAsUser` | Set Index Node containers' Security Context runAsUser | `1001` | +| `indexNode.containerSecurityContext.runAsNonRoot` | Set Index Node containers' Security Context runAsNonRoot | `true` | +| `indexNode.containerSecurityContext.readOnlyRootFilesystem` | Set Index Node containers' Security Context runAsNonRoot | `true` | +| `indexNode.containerSecurityContext.allowPrivilegeEscalation` | Set Index Node container's privilege escalation | `false` | +| `indexNode.containerSecurityContext.capabilities.drop` | Set Index Node container's Security Context runAsNonRoot | `["ALL"]` | +| `indexNode.lifecycleHooks` | for the data node container(s) to automate configuration before or after startup | `{}` | +| `indexNode.runtimeClassName` | Name of the runtime class to be used by pod(s) | `""` | +| `indexNode.hostAliases` | data node pods host aliases | `[]` | +| `indexNode.podLabels` | Extra labels for data node pods | `{}` | +| `indexNode.podAnnotations` | Annotations for data node pods | `{}` | +| `indexNode.podAffinityPreset` | Pod affinity preset. Ignored if `data node.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `indexNode.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `data node.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `indexNode.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `data node.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `indexNode.nodeAffinityPreset.key` | Node label key to match. Ignored if `data node.affinity` is set | `""` | +| `indexNode.nodeAffinityPreset.values` | Node label values to match. Ignored if `data node.affinity` is set | `[]` | +| `indexNode.affinity` | Affinity for Index Node pods assignment | `{}` | +| `indexNode.nodeSelector` | Node labels for Index Node pods assignment | `{}` | +| `indexNode.tolerations` | Tolerations for Index Node pods assignment | `[]` | +| `indexNode.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains | `[]` | +| `indexNode.priorityClassName` | Index Node pods' priorityClassName | `""` | +| `indexNode.schedulerName` | Kubernetes pod scheduler registry | `""` | +| `indexNode.updateStrategy.type` | Index Node statefulset strategy type | `RollingUpdate` | +| `indexNode.updateStrategy.rollingUpdate` | Index Node statefulset rolling update configuration parameters | `{}` | +| `indexNode.extraVolumes` | Optionally specify extra list of additional volumes for the Index Node pod(s) | `[]` | +| `indexNode.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Index Node container(s) | `[]` | +| `indexNode.sidecars` | Add additional sidecar containers to the Index Node pod(s) | `[]` | +| `indexNode.enableDefaultInitContainers` | Deploy default init containers | `true` | +| `indexNode.initContainers` | Add additional init containers to the Index Node pod(s) | `[]` | +| `indexNode.serviceAccount.create` | Enable creation of ServiceAccount for Index Node pods | `false` | +| `indexNode.serviceAccount.name` | The name of the ServiceAccount to use | `""` | +| `indexNode.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` | +| `indexNode.serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `indexNode.pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | +| `indexNode.pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `1` | +| `indexNode.pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | + +### Index Node Autoscaling configuration + +| Name | Description | Value | +| --------------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `indexNode.autoscaling.vpa.enabled` | Enable VPA | `false` | +| `indexNode.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `indexNode.autoscaling.vpa.controlledResources` | VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory | `[]` | +| `indexNode.autoscaling.vpa.maxAllowed` | VPA Max allowed resources for the pod | `{}` | +| `indexNode.autoscaling.vpa.minAllowed` | VPA Min allowed resources for the pod | `{}` | +| `indexNode.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod | `Auto` | +| `indexNode.autoscaling.hpa.enabled` | Enable HPA for Milvus Data Plane | `false` | +| `indexNode.autoscaling.hpa.annotations` | Annotations for HPA resource | `{}` | +| `indexNode.autoscaling.hpa.minReplicas` | Minimum number of Milvus Data Plane replicas | `""` | +| `indexNode.autoscaling.hpa.maxReplicas` | Maximum number of Milvus Data Plane replicas | `""` | +| `indexNode.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `indexNode.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | + +### Index Node Traffic Exposure Parameters + +| Name | Description | Value | +| ------------------------------------------------- | ---------------------------------------------------------------- | ----------- | +| `indexNode.service.type` | Index Node service type | `ClusterIP` | +| `indexNode.service.ports.grpc` | Index Node GRPC service port | `19530` | +| `indexNode.service.ports.metrics` | Index Node Metrics service port | `9091` | +| `indexNode.service.nodePorts.grpc` | Node port for GRPC | `""` | +| `indexNode.service.nodePorts.metrics` | Node port for Metrics | `""` | +| `indexNode.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `indexNode.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `indexNode.service.clusterIP` | Index Node service Cluster IP | `""` | +| `indexNode.service.loadBalancerIP` | Index Node service Load Balancer IP | `""` | +| `indexNode.service.loadBalancerSourceRanges` | Index Node service Load Balancer sources | `[]` | +| `indexNode.service.externalTrafficPolicy` | Index Node service external traffic policy | `Cluster` | +| `indexNode.service.annotations` | Additional custom annotations for Index Node service | `{}` | +| `indexNode.service.extraPorts` | Extra ports to expose in the Index Node service | `[]` | +| `indexNode.networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `indexNode.networkPolicy.allowExternal` | The Policy model to apply | `true` | +| `indexNode.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `indexNode.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `indexNode.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `indexNode.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | + +### Index Node Metrics Parameters + +| Name | Description | Value | +| ---------------------------------------------------- | ------------------------------------------------------------------------------------- | ------- | +| `indexNode.metrics.enabled` | Enable metrics | `false` | +| `indexNode.metrics.annotations` | Annotations for the server service in order to scrape metrics | `{}` | +| `indexNode.metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | +| `indexNode.metrics.serviceMonitor.annotations` | Annotations for the ServiceMonitor Resource | `""` | +| `indexNode.metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` | +| `indexNode.metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `indexNode.metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `indexNode.metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `indexNode.metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `indexNode.metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `indexNode.metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `indexNode.metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `indexNode.metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | + +### Proxy Deployment Parameters + +| Name | Description | Value | +| --------------------------------------------------------- | ----------------------------------------------------------------------------------------------- | ---------------- | +| `proxy.enabled` | Enable Proxy deployment | `true` | +| `proxy.extraEnvVars` | Array with extra environment variables to add to proxy nodes | `[]` | +| `proxy.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for proxy nodes | `""` | +| `proxy.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for proxy nodes | `""` | +| `proxy.defaultConfig` | Default override configuration from the common set in milvus.defaultConfig | `""` | +| `proxy.existingConfigMap` | name of a ConfigMap with existing configuration for the default configuration | `""` | +| `proxy.extraConfig` | Override configuration | `{}` | +| `proxy.extraConfigExistingConfigMap` | name of a ConfigMap with existing configuration for the Dashboard | `""` | +| `proxy.command` | Override default container command (useful when using custom images) | `[]` | +| `proxy.args` | Override default container args (useful when using custom images) | `[]` | +| `proxy.replicaCount` | Number of Proxy replicas to deploy | `1` | +| `proxy.containerPorts.grpc` | GRPC port for Proxy | `19530` | +| `proxy.containerPorts.grpcInternal` | GRPC internal port for Proxy | `19529` | +| `proxy.containerPorts.metrics` | Metrics port for Proxy | `9091` | +| `proxy.livenessProbe.enabled` | Enable livenessProbe on Proxy nodes | `true` | +| `proxy.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `proxy.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `proxy.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `proxy.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `proxy.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `proxy.readinessProbe.enabled` | Enable readinessProbe on Proxy nodes | `true` | +| `proxy.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `proxy.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `proxy.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `proxy.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `proxy.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `proxy.startupProbe.enabled` | Enable startupProbe on Proxy containers | `false` | +| `proxy.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `proxy.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `proxy.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `proxy.startupProbe.failureThreshold` | Failure threshold for startupProbe | `5` | +| `proxy.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `proxy.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `proxy.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `proxy.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `proxy.resources.limits` | The resources limits for the proxy containers | `{}` | +| `proxy.resources.requests` | The requested resources for the proxy containers | `{}` | +| `proxy.podSecurityContext.enabled` | Enabled Proxy pods' Security Context | `true` | +| `proxy.podSecurityContext.fsGroup` | Set Proxy pod's Security Context fsGroup | `1001` | +| `proxy.podSecurityContext.seccompProfile.type` | Set Proxy container's Security Context seccomp profile | `RuntimeDefault` | +| `proxy.containerSecurityContext.enabled` | Enabled Proxy containers' Security Context | `true` | +| `proxy.containerSecurityContext.runAsUser` | Set Proxy containers' Security Context runAsUser | `1001` | +| `proxy.containerSecurityContext.runAsNonRoot` | Set Proxy containers' Security Context runAsNonRoot | `true` | +| `proxy.containerSecurityContext.readOnlyRootFilesystem` | Set Proxy containers' Security Context runAsNonRoot | `true` | +| `proxy.containerSecurityContext.allowPrivilegeEscalation` | Set Proxy container's privilege escalation | `false` | +| `proxy.containerSecurityContext.capabilities.drop` | Set Proxy container's Security Context runAsNonRoot | `["ALL"]` | +| `proxy.lifecycleHooks` | for the proxy container(s) to automate configuration before or after startup | `{}` | +| `proxy.runtimeClassName` | Name of the runtime class to be used by pod(s) | `""` | +| `proxy.hostAliases` | proxy pods host aliases | `[]` | +| `proxy.podLabels` | Extra labels for proxy pods | `{}` | +| `proxy.podAnnotations` | Annotations for proxy pods | `{}` | +| `proxy.podAffinityPreset` | Pod affinity preset. Ignored if `proxy.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `proxy.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `proxy.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `proxy.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `proxy.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `proxy.nodeAffinityPreset.key` | Node label key to match. Ignored if `proxy.affinity` is set | `""` | +| `proxy.nodeAffinityPreset.values` | Node label values to match. Ignored if `proxy.affinity` is set | `[]` | +| `proxy.affinity` | Affinity for Proxy pods assignment | `{}` | +| `proxy.nodeSelector` | Node labels for Proxy pods assignment | `{}` | +| `proxy.tolerations` | Tolerations for Proxy pods assignment | `[]` | +| `proxy.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains | `[]` | +| `proxy.priorityClassName` | Proxy pods' priorityClassName | `""` | +| `proxy.schedulerName` | Kubernetes pod scheduler registry | `""` | +| `proxy.updateStrategy.type` | Proxy statefulset strategy type | `RollingUpdate` | +| `proxy.updateStrategy.rollingUpdate` | Proxy statefulset rolling update configuration parameters | `{}` | +| `proxy.extraVolumes` | Optionally specify extra list of additional volumes for the Proxy pod(s) | `[]` | +| `proxy.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Proxy container(s) | `[]` | +| `proxy.sidecars` | Add additional sidecar containers to the Proxy pod(s) | `[]` | +| `proxy.enableDefaultInitContainers` | Deploy default init containers | `true` | +| `proxy.initContainers` | Add additional init containers to the Proxy pod(s) | `[]` | +| `proxy.serviceAccount.create` | Enable creation of ServiceAccount for Proxy pods | `false` | +| `proxy.serviceAccount.name` | The name of the ServiceAccount to use | `""` | +| `proxy.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` | +| `proxy.serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `proxy.pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | +| `proxy.pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `1` | +| `proxy.pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | + +### Proxy Autoscaling configuration + +| Name | Description | Value | +| ----------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `proxy.autoscaling.vpa.enabled` | Enable VPA | `false` | +| `proxy.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `proxy.autoscaling.vpa.controlledResources` | VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory | `[]` | +| `proxy.autoscaling.vpa.maxAllowed` | VPA Max allowed resources for the pod | `{}` | +| `proxy.autoscaling.vpa.minAllowed` | VPA Min allowed resources for the pod | `{}` | +| `proxy.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod | `Auto` | +| `proxy.autoscaling.hpa.enabled` | Enable HPA for Milvus Data Plane | `false` | +| `proxy.autoscaling.hpa.annotations` | Annotations for HPA resource | `{}` | +| `proxy.autoscaling.hpa.minReplicas` | Minimum number of Milvus Data Plane replicas | `""` | +| `proxy.autoscaling.hpa.maxReplicas` | Maximum number of Milvus Data Plane replicas | `""` | +| `proxy.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `proxy.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | + +### Proxy Traffic Exposure Parameters + +| Name | Description | Value | +| --------------------------------------------- | ---------------------------------------------------------------- | -------------- | +| `proxy.service.type` | Proxy service type | `LoadBalancer` | +| `proxy.service.ports.grpc` | Proxy GRPC service port | `19530` | +| `proxy.service.ports.metrics` | Proxy Metrics service port | `9091` | +| `proxy.service.nodePorts.grpc` | Node port for GRPC | `""` | +| `proxy.service.nodePorts.metrics` | Node port for Metrics | `""` | +| `proxy.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `proxy.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `proxy.service.clusterIP` | Proxy service Cluster IP | `""` | +| `proxy.service.loadBalancerIP` | Proxy service Load Balancer IP | `""` | +| `proxy.service.loadBalancerSourceRanges` | Proxy service Load Balancer sources | `[]` | +| `proxy.service.externalTrafficPolicy` | Proxy service external traffic policy | `Cluster` | +| `proxy.service.annotations` | Additional custom annotations for Proxy service | `{}` | +| `proxy.service.extraPorts` | Extra ports to expose in the Proxy service | `[]` | +| `proxy.networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `proxy.networkPolicy.allowExternal` | The Policy model to apply | `true` | +| `proxy.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `proxy.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `proxy.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `proxy.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | + +### Proxy Metrics Parameters + +| Name | Description | Value | +| ------------------------------------------------ | ------------------------------------------------------------------------------------- | ------- | +| `proxy.metrics.enabled` | Enable metrics | `false` | +| `proxy.metrics.annotations` | Annotations for the server service in order to scrape metrics | `{}` | +| `proxy.metrics.serviceMonitor.enabled` | Create ServiceMonitor Resource for scraping metrics using Prometheus Operator | `false` | +| `proxy.metrics.serviceMonitor.annotations` | Annotations for the ServiceMonitor Resource | `""` | +| `proxy.metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) | `""` | +| `proxy.metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `proxy.metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `proxy.metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `proxy.metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `proxy.metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `proxy.metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `proxy.metrics.serviceMonitor.honorLabels` | Specify honorLabels parameter to add the scrape endpoint | `false` | +| `proxy.metrics.serviceMonitor.jobLabel` | The name of the label on the target service to use as the job name in prometheus. | `""` | + +### Attu Deployment Parameters + +| Name | Description | Value | +| -------------------------------------------------------- | ---------------------------------------------------------------------------------------------------- | -------------------- | +| `attu.enabled` | Enable Attu deployment | `true` | +| `attu.image.registry` | Attu image registry | `docker.io` | +| `attu.image.repository` | Attu image repository | `bitnami/attu` | +| `attu.image.tag` | Attu image tag (immutable tags are recommended) | `2.2.6-debian-11-r1` | +| `attu.image.digest` | Attu image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `attu.image.pullPolicy` | Attu image pull policy | `IfNotPresent` | +| `attu.image.pullSecrets` | Attu image pull secrets | `[]` | +| `attu.image.debug` | Enable debug mode | `false` | +| `attu.extraEnvVars` | Array with extra environment variables to add to attu nodes | `[]` | +| `attu.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for attu nodes | `""` | +| `attu.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for attu nodes | `""` | +| `attu.command` | Override default container command (useful when using custom images) | `[]` | +| `attu.args` | Override default container args (useful when using custom images) | `[]` | +| `attu.replicaCount` | Number of Attu replicas to deploy | `1` | +| `attu.containerPorts.http` | HTTP port for Attu | `3000` | +| `attu.livenessProbe.enabled` | Enable livenessProbe on Attu nodes | `true` | +| `attu.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `5` | +| `attu.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `attu.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `5` | +| `attu.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `5` | +| `attu.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `attu.readinessProbe.enabled` | Enable readinessProbe on Attu nodes | `true` | +| `attu.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `5` | +| `attu.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `attu.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `5` | +| `attu.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `5` | +| `attu.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `attu.startupProbe.enabled` | Enable startupProbe on Attu containers | `false` | +| `attu.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `5` | +| `attu.startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `attu.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `5` | +| `attu.startupProbe.failureThreshold` | Failure threshold for startupProbe | `5` | +| `attu.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `attu.customLivenessProbe` | Custom livenessProbe that overrides the default one | `{}` | +| `attu.customReadinessProbe` | Custom readinessProbe that overrides the default one | `{}` | +| `attu.customStartupProbe` | Custom startupProbe that overrides the default one | `{}` | +| `attu.resources.limits` | The resources limits for the attu containers | `{}` | +| `attu.resources.requests` | The requested resources for the attu containers | `{}` | +| `attu.podSecurityContext.enabled` | Enabled Attu pods' Security Context | `true` | +| `attu.podSecurityContext.fsGroup` | Set Attu pod's Security Context fsGroup | `1001` | +| `attu.podSecurityContext.seccompProfile.type` | Set Attu container's Security Context seccomp profile | `RuntimeDefault` | +| `attu.containerSecurityContext.enabled` | Enabled Attu containers' Security Context | `true` | +| `attu.containerSecurityContext.runAsUser` | Set Attu containers' Security Context runAsUser | `1001` | +| `attu.containerSecurityContext.runAsNonRoot` | Set Attu containers' Security Context runAsNonRoot | `true` | +| `attu.containerSecurityContext.readOnlyRootFilesystem` | Set Attu containers' Security Context runAsNonRoot | `true` | +| `attu.containerSecurityContext.allowPrivilegeEscalation` | Set Attu container's privilege escalation | `false` | +| `attu.containerSecurityContext.capabilities.drop` | Set Attu container's Security Context runAsNonRoot | `["ALL"]` | +| `attu.lifecycleHooks` | for the attu container(s) to automate configuration before or after startup | `{}` | +| `attu.runtimeClassName` | Name of the runtime class to be used by pod(s) | `""` | +| `attu.hostAliases` | attu pods host aliases | `[]` | +| `attu.podLabels` | Extra labels for attu pods | `{}` | +| `attu.podAnnotations` | Annotations for attu pods | `{}` | +| `attu.podAffinityPreset` | Pod affinity preset. Ignored if `attu.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `attu.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `attu.affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `attu.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `attu.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `attu.nodeAffinityPreset.key` | Node label key to match. Ignored if `attu.affinity` is set | `""` | +| `attu.nodeAffinityPreset.values` | Node label values to match. Ignored if `attu.affinity` is set | `[]` | +| `attu.affinity` | Affinity for Attu pods assignment | `{}` | +| `attu.nodeSelector` | Node labels for Attu pods assignment | `{}` | +| `attu.tolerations` | Tolerations for Attu pods assignment | `[]` | +| `attu.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains | `[]` | +| `attu.priorityClassName` | Attu pods' priorityClassName | `""` | +| `attu.schedulerName` | Kubernetes pod scheduler registry | `""` | +| `attu.updateStrategy.type` | Attu statefulset strategy type | `RollingUpdate` | +| `attu.updateStrategy.rollingUpdate` | Attu statefulset rolling update configuration parameters | `{}` | +| `attu.extraVolumes` | Optionally specify extra list of additional volumes for the Attu pod(s) | `[]` | +| `attu.extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the Attu container(s) | `[]` | +| `attu.sidecars` | Add additional sidecar containers to the Attu pod(s) | `[]` | +| `attu.enableDefaultInitContainers` | Deploy default init containers | `true` | +| `attu.initContainers` | Add additional init containers to the Attu pod(s) | `[]` | +| `attu.serviceAccount.create` | Enable creation of ServiceAccount for Attu pods | `false` | +| `attu.serviceAccount.name` | The name of the ServiceAccount to use | `""` | +| `attu.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` | +| `attu.serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `attu.pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | +| `attu.pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `1` | +| `attu.pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `""` | + +### Attu Autoscaling configuration + +| Name | Description | Value | +| ---------------------------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------- | +| `attu.autoscaling.vpa.enabled` | Enable VPA | `false` | +| `attu.autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `attu.autoscaling.vpa.controlledResources` | VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory | `[]` | +| `attu.autoscaling.vpa.maxAllowed` | VPA Max allowed resources for the pod | `{}` | +| `attu.autoscaling.vpa.minAllowed` | VPA Min allowed resources for the pod | `{}` | +| `attu.autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod | `Auto` | +| `attu.autoscaling.hpa.enabled` | Enable HPA for Milvus Data Plane | `false` | +| `attu.autoscaling.hpa.annotations` | Annotations for HPA resource | `{}` | +| `attu.autoscaling.hpa.minReplicas` | Minimum number of Milvus Data Plane replicas | `""` | +| `attu.autoscaling.hpa.maxReplicas` | Maximum number of Milvus Data Plane replicas | `""` | +| `attu.autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `attu.autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | + +### Attu Traffic Exposure Parameters + +| Name | Description | Value | +| -------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `attu.service.type` | Attu service type | `LoadBalancer` | +| `attu.service.ports.http` | Attu HTTP service port | `80` | +| `attu.service.nodePorts.http` | Node port for HTTP | `""` | +| `attu.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `attu.service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `attu.service.clusterIP` | Attu service Cluster IP | `""` | +| `attu.service.loadBalancerIP` | Attu service Load Balancer IP | `""` | +| `attu.service.loadBalancerSourceRanges` | Attu service Load Balancer sources | `[]` | +| `attu.service.externalTrafficPolicy` | Attu service external traffic policy | `Cluster` | +| `attu.service.annotations` | Additional custom annotations for Attu service | `{}` | +| `attu.service.extraPorts` | Extra ports to expose in the Attu service | `[]` | +| `attu.ingress.enabled` | Enable ingress record generation for Milvus | `false` | +| `attu.ingress.pathType` | Ingress path type | `ImplementationSpecific` | +| `attu.ingress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` | +| `attu.ingress.hostname` | Default host for the ingress record | `milvus.local` | +| `attu.ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `attu.ingress.path` | Default path for the ingress record | `/` | +| `attu.ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | +| `attu.ingress.tls` | Enable TLS configuration for the host defined at `attu.ingress.hostname` parameter | `false` | +| `attu.ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` | +| `attu.ingress.extraHosts` | An array with additional hostname(s) to be covered with the ingress record | `[]` | +| `attu.ingress.extraPaths` | An array with additional arbitrary paths that may need to be added to the ingress under the main host | `[]` | +| `attu.ingress.extraTls` | TLS configuration for additional hostname(s) to be covered with this ingress record | `[]` | +| `attu.ingress.secrets` | Custom TLS certificates as secrets | `[]` | +| `attu.ingress.extraRules` | Additional rules to be covered with this ingress record | `[]` | +| `attu.networkPolicy.enabled` | Enable creation of NetworkPolicy resources | `false` | +| `attu.networkPolicy.allowExternal` | The Policy model to apply | `true` | +| `attu.networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `attu.networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `attu.networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `attu.networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | + +### Init Container Parameters + +| Name | Description | Value | +| ----------------------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------- | ----------------------- | +| `waitContainer.image.registry` | Init container wait-container image registry | `docker.io` | +| `waitContainer.image.repository` | Init container wait-container image name | `bitnami/bitnami-shell` | +| `waitContainer.image.tag` | Init container wait-container image tag | `11-debian-11-r127` | +| `waitContainer.image.digest` | Init container wait-container image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `waitContainer.image.pullPolicy` | Init container wait-container image pull policy | `IfNotPresent` | +| `waitContainer.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `waitContainer.containerSecurityContext.enabled` | Enabled Milvus containers' Security Context | `true` | +| `waitContainer.containerSecurityContext.runAsUser` | Set Milvus containers' Security Context runAsUser | `1001` | +| `waitContainer.containerSecurityContext.runAsNonRoot` | Set Milvus containers' Security Context runAsNonRoot | `true` | +| `waitContainer.containerSecurityContext.readOnlyRootFilesystem` | Set Milvus containers' Security Context runAsNonRoot | `true` | +| `waitContainer.containerSecurityContext.allowPrivilegeEscalation` | Set Milvus container's privilege escalation | `false` | +| `waitContainer.containerSecurityContext.capabilities.drop` | Set Milvus container's Security Context runAsNonRoot | `["ALL"]` | + +### External etcd parameters + +| Name | Description | Value | +| ------------------------------ | ------------------------------------------- | ------- | +| `externalEtcd.servers` | List of hostnames of the external etcd | `[]` | +| `externalEtcd.port` | Port of the external etcd instance | `2379` | +| `externalEtcd.secureTransport` | Use TLS for client-to-server communications | `false` | + +### External S3 parameters + +| Name | Description | Value | +| ----------------------------------------- | ------------------------------------------------------------------ | --------------- | +| `externalS3.host` | External S3 host | `""` | +| `externalS3.port` | External S3 port number | `443` | +| `externalS3.accessKeyID` | External S3 access key ID | `""` | +| `externalS3.accessKeySecret` | External S3 access key secret | `""` | +| `externalS3.existingSecret` | Name of an existing secret resource containing the S3 credentials | `""` | +| `externalS3.existingSecretAccessKeyIDKey` | Name of an existing secret key containing the S3 access key ID | `root-user` | +| `externalS3.existingSecretKeySecretKey` | Name of an existing secret key containing the S3 access key secret | `root-password` | +| `externalS3.protocol` | External S3 protocol | `https` | +| `externalS3.bucket` | External S3 bucket | `milvus` | +| `externalS3.rootPath` | External S3 root path | `file` | +| `externalS3.iamEndpoint` | External S3 IAM endpoint | `""` | +| `externalS3.cloudProvider` | External S3 cloud provider | `""` | + +### External Kafka parameters + +| Name | Description | Value | +| ----------------------- | ---------------------- | --------------- | +| `externalKafka.servers` | External Kafka brokers | `["localhost"]` | +| `externalKafka.port` | External Kafka port | `9092` | + +### etcd sub-chart parameters + +| Name | Description | Value | +| ---------------------------------- | ------------------------------------------- | ------- | +| `etcd.enabled` | Deploy etcd sub-chart | `true` | +| `etcd.replicaCount` | Number of etcd replicas | `3` | +| `etcd.containerPorts.client` | Container port for etcd | `2379` | +| `etcd.auth.rbac.create` | Switch to enable RBAC authentication | `false` | +| `etcd.auth.client.secureTransport` | use TLS for client-to-server communications | `false` | + +### MinIO® chart parameters + +| Name | Description | Value | +| ---------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | --------------------------------------------------- | +| `minio` | For full list of MinIO® values configurations please refere [here](https://github.com/bitnami/charts/tree/main/bitnami/minio) | | +| `minio.enabled` | Enable/disable MinIO® chart installation | `true` | +| `minio.auth.rootUser` | MinIO® root username | `admin` | +| `minio.auth.rootPassword` | Password for MinIO® root user | `""` | +| `minio.auth.existingSecret` | Name of an existing secret containing the MinIO® credentials | `""` | +| `minio.defaultBuckets` | Comma, semi-colon or space separated list of MinIO® buckets to create | `milvus` | +| `minio.provisioning.enabled` | Enable/disable MinIO® provisioning job | `true` | +| `minio.provisioning.extraCommands` | Extra commands to run on MinIO® provisioning job | `["mc anonymous set download provisioning/milvus"]` | +| `minio.tls.enabled` | Enable/disable MinIO® TLS support | `false` | +| `minio.service.type` | MinIO® service type | `ClusterIP` | +| `minio.service.loadBalancerIP` | MinIO® service LoadBalancer IP | `""` | +| `minio.service.ports.api` | MinIO® service port | `80` | + +### kafka sub-chart paramaters + +| Name | Description | Value | +| ---------------------------------- | -------------------------------------------- | ---------- | +| `kafka.enabled` | Enable/disable Kafka chart installation | `true` | +| `kafka.replicaCount` | Number of Kafka brokers | `1` | +| `kafka.service.ports.client` | Kafka svc port for client connections | `9092` | +| `kafka.auth.clientProtocol` | Kafka authentication protocol for the client | `sasl` | +| `kafka.auth.sasl.mechanisms` | Kafka authentication mechanisms for SASL | `plain` | +| `kafka.auth.sasl.jaas.clientUsers` | Kafka client users | `["user"]` | + +See to create the table. + +The above parameters map to the env variables defined in [bitnami/milvus](https://github.com/bitnami/containers/tree/main/bitnami/milvus). For more information please refer to the [bitnami/milvus](https://github.com/bitnami/containers/tree/main/bitnami/milvus) image documentation. + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +helm install my-release \ + --set loki.traces.jaeger.grpc=true \ + oci://registry-1.docker.io/bitnamicharts/milvus +``` + +The above command enables the Jaeger GRPC traces. + +Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, + +```console +helm install my-release -f values.yaml oci://registry-1.docker.io/bitnamicharts/milvus +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Milvus configuration + +The Milvus configuration file `milvus.yaml` is shared across the different components: `rootCoord`, `dataCoord`, `indexCoord`, `dataNode` and `indexNode`. This is set in the `milvus.defaultConfig` value. This configuration can be extended with extra settings using the `milvus.extraConfig` value. For specific component configuration edit the `extraConfig` section inside each of the previously mentioned components. Check the official [Milvis documentation](https://milvus.io/docs) for the list of possible configurations. + +### Additional environment variables + +In case you want to add extra environment variables (useful for advanced operations like custom init scripts), you can use the `extraEnvVars` property inside each of the subsections: `rootCoord`, `dataCoord`, `indexCoord`, `dataNode`, `indexNode`, `attu` and `queryNode`. + +```yaml +dataCoord: + extraEnvVars: + - name: LOG_LEVEL + value: error + +rootCoord: + extraEnvVars: + - name: LOG_LEVEL + value: error + +indexCoord: + extraEnvVars: + - name: LOG_LEVEL + value: error + +dataNode: + extraEnvVars: + - name: LOG_LEVEL + value: error + +indexNode: + extraEnvVars: + - name: LOG_LEVEL + value: error + +queryNode: + extraEnvVars: + - name: LOG_LEVEL + value: error +``` + +Alternatively, you can use a ConfigMap or a Secret with the environment variables. To do so, use the `extraEnvVarsCM` or the `extraEnvVarsSecret` values. + +### Sidecars + +If additional containers are needed in the same pod as milvus (such as additional metrics or logging exporters), they can be defined using the `sidecars` parameter inside each of the subsections: `rootCoord`, `dataCoord`, `indexCoord`, `dataNode`, `indexNode`, `attu` and `queryNode` . If these sidecars export extra ports, extra port definitions can be added using the `service.extraPorts` parameter. [Learn more about configuring and using sidecar containers](https://docs.bitnami.com/kubernetes/infrastructure/milvus/configuration/configure-sidecar-init-containers/). + +### Pod affinity + +This chart allows you to set your custom affinity using the `affinity` parameter. Find more information about Pod affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). + +As an alternative, use one of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters inside each of the subsections: `rootCoord`, `dataCoord`, `indexCoord`, `dataNode`, `indexNode`, `attu` and `queryNode`. + +### External kafka support + +You may want to have Milvus connect to an external kafka rather than installing one inside your cluster. Typical reasons for this are to use a managed database service, or to share a common database server for all your applications. To achieve this, the chart allows you to specify credentials for an external database with the [`externalKafka` parameter](#parameters). You should also disable the etcd installation with the `etcd.enabled` option. Here is an example: + +```yaml +kafka: + enabled: false +externalKafka: + hosts: + - externalhost +``` + +### External etcd support + +You may want to have Milvus connect to an external etcd rather than installing one inside your cluster. Typical reasons for this are to use a managed database service, or to share a common database server for all your applications. To achieve this, the chart allows you to specify credentials for an external database with the [`externalEtcd` parameter](#parameters). You should also disable the etcd installation with the `etcd.enabled` option. Here is an example: + +```yaml +etcd: + enabled: false +externalEtcd: + hosts: + - externalhost +``` + +### External S3 support + +You may want to have mastodon connect to an external storage streaming rather than installing MiniIO(TM) inside your cluster. To achieve this, the chart allows you to specify credentials for an external storage streaming with the [`externalS3` parameter](#parameters). You should also disable the MinIO(TM) installation with the `minio.enabled` option. Here is an example: + +```console +minio.enabled=false +externalS3.host=myexternalhost +exterernalS3.accessKeyID=accesskey +externalS3.accessKeySecret=secret +``` + +### Ingress + +This chart provides support for Ingress resources for the Attu component. If you have an ingress controller installed on your cluster, such as [nginx-ingress-controller](https://github.com/bitnami/charts/tree/main/bitnami/nginx-ingress-controller) or [contour](https://github.com/bitnami/charts/tree/main/bitnami/contour) you can utilize the ingress controller to serve the Attu dashboard. + +To enable Ingress integration, set `attu.ingress.enabled` to `true`. The `attu.ingress.hostname` property can be used to set the host name. The `attu.ingress.tls` parameter can be used to add the TLS configuration for this host. It is also possible to have more than one host, with a separate TLS configuration for each host. [Learn more about configuring and using Ingress](https://docs.bitnami.com/kubernetes/apps/mastodon/configuration/configure-ingress/). + +### TLS secrets + +The chart also facilitates the creation of TLS secrets for use with the Ingress controller, with different options for certificate management. [Learn more about TLS secrets](https://docs.bitnami.com/kubernetes/apps/mastodon/administration/enable-tls-ingress/). + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami's Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## License + +Copyright © 2023 VMware, Inc. + +Licensed under the Apache License, Version 2.0 (the "License"); +you may not use this file except in compliance with the License. +You may obtain a copy of the License at + + + +Unless required by applicable law or agreed to in writing, software +distributed under the License is distributed on an "AS IS" BASIS, +WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +See the License for the specific language governing permissions and +limitations under the License. diff --git a/bitnami/milvus/templates/NOTES.txt b/bitnami/milvus/templates/NOTES.txt new file mode 100644 index 0000000000..4005f86295 --- /dev/null +++ b/bitnami/milvus/templates/NOTES.txt @@ -0,0 +1,115 @@ +CHART NAME: {{ .Chart.Name }} +CHART VERSION: {{ .Chart.Version }} +APP VERSION: {{ .Chart.AppVersion }} + +** Please be patient while the chart is being deployed ** + +{{- if .Values.diagnosticMode.enabled }} +The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: + + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 4 }} + +Get the list of pods by executing: + + kubectl get pods --namespace {{ include "common.names.namespace" . }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Access the pod you want to debug by executing + + kubectl exec --namespace {{ include "common.names.namespace" . }} -ti -- bash + +{{- else }} + +Installed components: + + {{- if .Values.proxy.enabled }} + * proxy + {{- end }} + {{- if .Values.dataCoord.enabled }} + * data-coordinator + {{- end }} + {{- if .Values.indexCoord.enabled }} + * index-coordinator + {{- end }} + {{- if .Values.queryCoord.enabled }} + * query-coordinator + {{- end }} + {{- if .Values.rootCoord.enabled }} + * root-coordinator + {{- end }} + {{- if .Values.dataNode.enabled }} + * data-node + {{- end }} + {{- if .Values.indexNode.enabled }} + * index-node + {{- end }} + {{- if .Values.queryNode.enabled }} + * query-node + {{- end }} + +{{- if .Values.attu.enabled }} +Attu: +{{- if .Values.attu.ingress.enabled }} + +1. Get the Attu URL and associate the gateway hostname to your cluster external IP: + + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + echo "Attu URL: http{{ if .Values.attu.ingress.tls }}s{{ end }}://{{ .Values.attu.ingress.hostname }}/" + echo "$CLUSTER_IP {{ .Values.attu.ingress.hostname }}" | sudo tee -a /etc/hosts +{{- else }} + +1. Get the gateway URL by running these commands: + +{{- if contains "NodePort" .Values.attu.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "milvus.attu.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.attu.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ include "common.names.namespace" . }} svc -w {{ template "milvus.attu.fullname" . }} + export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.names.namespace" . }} {{ template "common.names.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo http://$SERVICE_IP:{{ .Values.attu.service.ports.http }} +{{- else if contains "ClusterIP" .Values.attu.service.type }} + echo "Attu is available at http://127.0.0.1:{{ .Values.attu.service.ports.http }}" + kubectl port-forward svc/{{ template "milvus.attu.fullname" . }} {{ .Values.attu.service.ports.http }}:{{ .Values.attu.service.ports.http }} & +{{- end }} +{{- end }} +{{- end }} + +{{- if .Values.proxy.enabled }} +Proxy: + +1. Get the gateway URL by running these commands: + +{{- if contains "NodePort" .Values.proxy.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "milvus.proxy.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo grpc://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.proxy.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ include "common.names.namespace" . }} svc -w {{ template "milvus.proxy.fullname" . }} + export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.names.namespace" . }} {{ template "common.names.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') + echo grpc://$SERVICE_IP:{{ .Values.proxy.service.ports.grpc }} +{{- else if contains "ClusterIP" .Values.proxy.service.type }} + echo "Proxy is available at grpc://127.0.0.1:{{ .Values.proxy.service.ports.grpc }}" + kubectl port-forward svc/{{ template "milvus.proxy.fullname" . }} {{ .Values.proxy.service.ports.grpc }}:{{ .Values.proxy.service.ports.grpc }} & +{{- end }} +{{- end }} + +{{- if .Values.milvus.auth.enabled }} +2. Access Milvus with the following credentials: + + echo Username: {{ .Values.milvus.auth.username }} + echo Password: $(kubectl get secret --namespace {{ include "common.names.namespace" . }} {{ include "milvus.secretName" . }} -o jsonpath="{.data.password}" | base64 -d) +{{- end }} + +Check the status of the pods by running this command: + + kubectl get pods --namespace {{ include "common.names.namespace" . | quote }} -l app.kubernetes.io/instance={{ .Release.Name }} + +Check the upstream Milvus documentation: https://milvus.io/docs + +{{- include "milvus.checkRollingTags" . }} +{{- include "milvus.validateValues" . }} + +{{- end }} diff --git a/bitnami/milvus/templates/_helpers.tpl b/bitnami/milvus/templates/_helpers.tpl new file mode 100644 index 0000000000..b3e1bdcf1d --- /dev/null +++ b/bitnami/milvus/templates/_helpers.tpl @@ -0,0 +1,1065 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* +Return the proper Milvus image name +*/}} +{{- define "milvus.image" -}} +{{- include "common.images.image" (dict "imageRoot" .Values.milvus.image "global" .Values.global) -}} +{{- end -}} + +{{/* +Return the proper Attu image name +*/}} +{{- define "milvus.attu.image" -}} +{{- include "common.images.image" (dict "imageRoot" .Values.attu.image "global" .Values.global) -}} +{{- end -}} + +{{/* +Return the proper Init job image name +*/}} +{{- define "milvus.init-job.image" -}} +{{- include "common.images.image" (dict "imageRoot" .Values.initJob.image "global" .Values.global) -}} +{{- end -}} + +{{/* +Return the proper Wait container image name +*/}} +{{- define "milvus.wait-container.image" -}} +{{- include "common.images.image" (dict "imageRoot" .Values.waitContainer.image "global" .Values.global) -}} +{{- end -}} + +{{/* +Return the proper Milvus Data Coordinator fullname +*/}} +{{- define "milvus.data-coordinator.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "data-coordinator" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Name of the Milvus Data Coordinator service account to use +*/}} +{{- define "milvus.data-coordinator.serviceAccountName" -}} +{{- if .Values.dataCoord.serviceAccount.create -}} + {{ default (printf "%s" (include "milvus.data-coordinator.fullname" .)) .Values.dataCoord.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.dataCoord.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the configuration configmap for Milvus Data Coordinator +*/}} +{{- define "milvus.data-coordinator.configmapName" -}} +{{- if .Values.dataCoord.existingConfigMap -}} + {{- .Values.dataCoord.existingConfigMap -}} +{{- else }} + {{- include "milvus.data-coordinator.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extra configuration configmap for Milvus Data Coordinator +*/}} +{{- define "milvus.data-coordinator.extraConfigmapName" -}} +{{- if .Values.dataCoord.extraConfigExistingConfigMap -}} + {{- include "common.tplvalues.render" (dict "value" .Value.dataCoord.extraConfigExistingConfigMap "context" $) -}} +{{- else -}} + {{- printf "%s-extra" (include "milvus.data-coordinator.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Milvus Index Coordinator fullname +*/}} +{{- define "milvus.index-coordinator.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "index-coordinator" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Name of the Milvus Index Coordinator service account to use +*/}} +{{- define "milvus.index-coordinator.serviceAccountName" -}} +{{- if .Values.indexCoord.serviceAccount.create -}} + {{ default (printf "%s" (include "milvus.index-coordinator.fullname" .)) .Values.indexCoord.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.indexCoord.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the extra configuration configmap for Milvus Index Coordinator +*/}} +{{- define "milvus.index-coordinator.configmapName" -}} +{{- if .Values.indexCoord.existingConfigMap -}} + {{- .Values.indexCoord.existingConfigMap -}} +{{- else }} + {{- include "milvus.index-coordinator.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extra configuration configmap for Milvus Index Coordinator +*/}} +{{- define "milvus.index-coordinator.extraConfigmapName" -}} +{{- if .Values.indexCoord.extraConfigExistingConfigMap -}} + {{- include "common.tplvalues.render" (dict "value" .Value.indexCoord.extraConfigExistingConfigMap "context" $) -}} +{{- else -}} + {{- printf "%s-extra" (include "milvus.index-coordinator.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Milvus Query Coordinator fullname +*/}} +{{- define "milvus.query-coordinator.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "query-coordinator" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Name of the Milvus Query Coordinator service account to use +*/}} +{{- define "milvus.query-coordinator.serviceAccountName" -}} +{{- if .Values.queryCoord.serviceAccount.create -}} + {{ default (printf "%s" (include "milvus.query-coordinator.fullname" .)) .Values.queryCoord.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.queryCoord.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the configuration configmap for Milvus Query Coordinator +*/}} +{{- define "milvus.query-coordinator.configmapName" -}} +{{- if .Values.queryCoord.existingConfigMap -}} + {{- .Values.queryCoord.existingConfigMap -}} +{{- else }} + {{- include "milvus.query-coordinator.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extra configuration configmap for Milvus Query Coordinator +*/}} +{{- define "milvus.query-coordinator.extraConfigmapName" -}} +{{- if .Values.queryCoord.extraConfigExistingConfigMap -}} + {{- include "common.tplvalues.render" (dict "value" .Value.queryCoord.extraConfigExistingConfigMap "context" $) -}} +{{- else -}} + {{- printf "%s-extra" (include "milvus.query-coordinator.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Milvus Root Coordinator fullname +*/}} +{{- define "milvus.root-coordinator.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "root-coordinator" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Name of the Milvus Root Coordinator service account to use +*/}} +{{- define "milvus.root-coordinator.serviceAccountName" -}} +{{- if .Values.rootCoord.serviceAccount.create -}} + {{- default (printf "%s" (include "milvus.root-coordinator.fullname" .)) .Values.rootCoord.serviceAccount.name -}} +{{- else -}} + {{- default "default" .Values.rootCoord.serviceAccount.name -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extra configuration configmap for Milvus Root Coordinator +*/}} +{{- define "milvus.root-coordinator.configmapName" -}} +{{- if .Values.rootCoord.existingConfigMap -}} + {{- .Values.rootCoord.existingConfigMap -}} +{{- else }} + {{- include "milvus.root-coordinator.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extra configuration configmap for Milvus Root Coordinator +*/}} +{{- define "milvus.root-coordinator.extraConfigmapName" -}} +{{- if .Values.rootCoord.extraConfigExistingConfigMap -}} + {{- include "common.tplvalues.render" (dict "value" .Value.rootCoord.extraConfigExistingConfigMap "context" $) -}} +{{- else -}} + {{- printf "%s-extra" (include "milvus.root-coordinator.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Milvus Data Node fullname +*/}} +{{- define "milvus.data-node.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "data-node" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Name of the Milvus Data Node service account to use +*/}} +{{- define "milvus.data-node.serviceAccountName" -}} +{{- if .Values.dataNode.serviceAccount.create -}} + {{ default (printf "%s" (include "milvus.data-node.fullname" .)) .Values.dataNode.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.dataNode.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the configuration configmap for Milvus Data Node +*/}} +{{- define "milvus.data-node.configmapName" -}} +{{- if .Values.dataNode.existingConfigMap -}} + {{- .Values.dataNode.existingConfigMap -}} +{{- else }} + {{- include "milvus.data-node.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extra configuration configmap for Milvus Data Node +*/}} +{{- define "milvus.data-node.extraConfigmapName" -}} +{{- if .Values.dataNode.extraConfigExistingConfigMap -}} + {{- include "common.tplvalues.render" (dict "value" .Value.dataNode.extraConfigExistingConfigMap "context" $) -}} +{{- else -}} + {{- printf "%s-extra" (include "milvus.data-node.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Milvus Index node fullname +*/}} +{{- define "milvus.index-node.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "index-node" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Name of the Milvus Index Node service account to use +*/}} +{{- define "milvus.index-node.serviceAccountName" -}} +{{- if .Values.indexNode.serviceAccount.create -}} + {{ default (printf "%s" (include "milvus.index-node.fullname" .)) .Values.indexNode.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.indexNode.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the configuration configmap for Milvus Index Node +*/}} +{{- define "milvus.index-node.configmapName" -}} +{{- if .Values.indexNode.existingConfigMap -}} + {{- .Values.indexNode.existingConfigMap -}} +{{- else }} + {{- include "milvus.index-node.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extra configuration configmap for Milvus Index Node +*/}} +{{- define "milvus.index-node.extraConfigmapName" -}} +{{- if .Values.indexNode.extraConfigExistingConfigMap -}} + {{- include "common.tplvalues.render" (dict "value" .Value.indexNode.extraConfigExistingConfigMap "context" $) -}} +{{- else -}} + {{- printf "%s-extra" (include "milvus.index-node.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Milvus Query Node fullname +*/}} +{{- define "milvus.query-node.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "query-node" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Name of the Milvus Query Node service account to use +*/}} +{{- define "milvus.query-node.serviceAccountName" -}} +{{- if .Values.queryNode.serviceAccount.create -}} + {{- default (printf "%s" (include "milvus.query-node.fullname" .)) .Values.queryNode.serviceAccount.name -}} +{{- else -}} + {{- default "default" .Values.queryNode.serviceAccount.name -}} +{{- end -}} +{{- end -}} + +{{/* +Get the configuration configmap for Milvus Query Node +*/}} +{{- define "milvus.query-node.configmapName" -}} +{{- if .Values.queryNode.existingConfigMap -}} + {{- .Values.queryNode.existingConfigMap -}} +{{- else }} + {{- include "milvus.query-node.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extra configuration configmap for Milvus Query Node +*/}} +{{- define "milvus.query-node.extraConfigmapName" -}} +{{- if .Values.queryNode.extraConfigExistingConfigMap -}} + {{- include "common.tplvalues.render" (dict "value" .Value.queryNode.extraConfigExistingConfigMap "context" $) -}} +{{- else -}} + {{- printf "%s-extra" (include "milvus.query-node.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Milvus Proxy fullname +*/}} +{{- define "milvus.proxy.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "proxy" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Name of the Milvus Proxy service account to use +*/}} +{{- define "milvus.proxy.serviceAccountName" -}} +{{- if .Values.proxy.serviceAccount.create -}} + {{ default (printf "%s" (include "milvus.proxy.fullname" .)) .Values.proxy.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.proxy.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the configuration configmap for Milvus Proxy +*/}} +{{- define "milvus.proxy.configmapName" -}} +{{- if .Values.proxy.existingConfigMap -}} + {{- .Values.proxy.existingConfigMap -}} +{{- else }} + {{- include "milvus.proxy.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extra configuration configmap for Milvus Proxy +*/}} +{{- define "milvus.proxy.extraConfigmapName" -}} +{{- if .Values.proxy.extraConfigExistingConfigMap -}} + {{- include "common.tplvalues.render" (dict "value" .Value.proxy.extraConfigExistingConfigMap "context" $) -}} +{{- else -}} + {{- printf "%s-extra" (include "milvus.proxy.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Attu fullname +*/}} +{{- define "milvus.attu.fullname" -}} +{{- printf "%s-%s" (include "common.names.fullname" .) "attu" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Name of the Attu service account to use +*/}} +{{- define "milvus.attu.serviceAccountName" -}} +{{- if .Values.attu.serviceAccount.create -}} + {{ default (printf "%s" (include "milvus.attu.fullname" .)) .Values.attu.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.attu.serviceAccount.name }} +{{- end -}} +{{- end -}} + + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "milvus.imagePullSecrets" -}} +{{- include "common.images.pullSecrets" (dict "images" (list .Values.milvus.image .Values.waitContainer.image) "global" .Values.global) -}} +{{- end -}} + +{{/* +Get the credentials secret +*/}} +{{- define "milvus.secretName" -}} +{{- if .Values.milvus.auth.existingSecret -}} + {{- include "common.tplvalues.render" (dict "value" .Values.milvus.auth.existingSecret "context" $) -}} +{{- else }} + {{- include "common.names.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Get the secret password key +*/}} +{{- define "milvus.secretPasswordKey" -}} +{{- if and .Values.milvus.auth.existingSecret .Values.milvus.auth.existingSecretPasswordKey -}} + {{- print .Values.milvus.auth.existingSecretPasswordKey -}} +{{- else }} + {{- print "password" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the secret password key +*/}} +{{- define "milvus.secretRootPasswordKey" -}} +{{- if and .Values.milvus.auth.existingSecret .Values.milvus.auth.existingSecretPasswordKey -}} + {{- print .Values.milvus.auth.existingSecretPasswordKey -}} +{{- else }} + {{- print "root-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Get the common configuration configmap. +*/}} +{{- define "milvus.configmapName" -}} +{{- if .Values.milvus.existingConfigMap -}} + {{- include "common.tplvalues.render" (dict "value" .Values.milvus.existingConfigMap "context" $) -}} +{{- else }} + {{- include "common.names.fullname" . -}} +{{- end -}} +{{- end -}} + +{{/* +Get the common extra configuration configmap. +*/}} +{{- define "milvus.extraConfigmapName" -}} +{{- if .Values.milvus.extraConfigExistingConfigMap -}} + {{- include "common.tplvalues.render" (dict "value" .Value.milvus.extraConfigExistingConfigMap "context" $) -}} +{{- else -}} + {{- printf "%s-extra" (include "common.names.fullname" .) -}} +{{- end -}} +{{- end -}} + + +{{/* +Create a default fully qualified app name for etcd +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "milvus.etcd.fullname" -}} +{{- include "common.names.dependency.fullname" (dict "chartName" "etcd" "chartValues" .Values.etcd "context" $) -}} +{{- end -}} + +{{/* +Return etcd port +*/}} +{{- define "milvus.etcd.port" -}} +{{- if .Values.etcd.enabled -}} + {{/* We are using the headless service so we need to use the container port */}} + {{- print .Values.etcd.containerPorts.client -}} +{{- else -}} + {{- print .Values.externalEtcd.port -}} +{{- end -}} +{{- end -}} + +{{/* +Return the etcd headless service name +*/}} +{{- define "milvus.etcd.headlessServiceName" -}} +{{- printf "%s-headless" (include "milvus.etcd.fullname" .) -}} +{{- end -}} + +{{/* +Create a default fully qualified app name for kafka +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "milvus.kafka.fullname" -}} +{{- include "common.names.dependency.fullname" (dict "chartName" "kafka" "chartValues" .Values.kafka "context" $) -}} +{{- end -}} + +{{/* +Return true if Kafka is used by Milvus +*/}} +{{- define "milvus.kafka.deployed" -}} + {{- if or .Values.kafka.enabled .Values.externalKafka.servers -}} + {{- true -}} + {{- end -}} +{{- end -}} + +{{/* +Return kafka port +*/}} +{{- define "milvus.kafka.port" -}} +{{- if .Values.kafka.enabled -}} + {{- print .Values.kafka.service.ports.client -}} +{{- else -}} + {{- print .Values.externalKafka.port -}} +{{- end -}} +{{- end -}} + +{{/* +Return the kafka headless service name +*/}} +{{- define "milvus.kafka.headlessServiceName" -}} +{{- printf "%s-headless" (include "milvus.kafka.fullname" .) -}} +{{- end -}} + +{{/* +Return true if kafka authentication is enabled +*/}} +{{- define "milvus.kafka.authEnabled" }} +{{- if .Values.kafka.enabled -}} + {{- if contains "sasl" .Values.kafka.auth.clientProtocol -}} + {{- true -}} + {{- end -}} +{{- else if .Values.externalKafka.servers -}} + {{- if or .Values.externalKafka.existingSecret .Values.externalKafka.password -}} + {{- true -}} + {{- end -}} +{{- end }} +{{- end }} + +{{/* +Return Kafka authentication SASL mechanisms +*/}} +{{- define "milvus.kafka.saslMechanisms" }} +{{- if .Values.kafka.enabled -}} + {{- upper .Values.kafka.auth.sasl.mechanisms -}} +{{- else -}} + {{- .Values.externalKafka.saslMechanisms -}} +{{- end }} +{{- end }} + +{{/* +Return Kafka security protocol +*/}} +{{- define "milvus.kafka.securityProtocol" }} +{{- if .Values.kafka.enabled -}} + {{- if eq .Values.kafka.auth.clientProtocol "sasl" -}} + {{- print "SASL_PLAINTEXT" -}} + {{- else -}} + {{- print .Values.kafka.auth.clientProtocol -}} + {{- end -}} +{{- else -}} + {{- print .Values.externalKafka.securityProtocol -}} +{{- end }} +{{- end }} + +{{/* +Return kafka credential secret name +*/}} +{{- define "milvus.kafka.secretName" -}} +{{- if .Values.kafka.enabled -}} + {{- printf "%s-jaas" (include "milvus.kafka.fullname" .) -}} +{{- else if .Values.externalKafka.existingSecret -}} + {{- print .Values.externalKafka.existingSecret -}} +{{- else -}} + {{- printf "%s-external-kafka" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Return kafka secret password key +*/}} +{{- define "milvus.kafka.secretPasswordKey" -}} +{{- if .Values.kafka.enabled -}} + {{- print "system-user-password" -}} +{{- else -}} + {{- print .Values.externalKafka.existingSecretPasswordKey -}} +{{- end -}} +{{- end -}} + +{{/* +Return kafka username +*/}} +{{- define "milvus.kafka.user" -}} +{{- if .Values.kafka.enabled -}} + {{- print (index .Values.kafka.auth.sasl.jaas.clientUsers 0) -}} +{{- else -}} + {{- print .Values.externalKafka.user -}} +{{- end -}} +{{- end -}} + +{{/* +Return MinIO(TM) fullname +*/}} +{{- define "milvus.minio.fullname" -}} +{{- include "common.names.dependency.fullname" (dict "chartName" "minio" "chartValues" .Values.minio "context" $) -}} +{{- end -}} + +{{/* +Return the S3 backend host +*/}} +{{- define "milvus.s3.host" -}} + {{- if .Values.minio.enabled -}} + {{- include "milvus.minio.fullname" . -}} + {{- else -}} + {{- print .Values.externalS3.host -}} + {{- end -}} +{{- end -}} + +{{/* +Return the S3 bucket +*/}} +{{- define "milvus.s3.bucket" -}} + {{- if .Values.minio.enabled -}} + {{- print .Values.minio.defaultBuckets -}} + {{- else -}} + {{- print .Values.externalS3.bucket -}} + {{- end -}} +{{- end -}} + +{{/* +Return the S3 protocol +*/}} +{{- define "milvus.s3.protocol" -}} + {{- if .Values.minio.enabled -}} + {{- ternary "https" "http" .Values.minio.tls.enabled -}} + {{- else -}} + {{- print .Values.externalS3.protocol -}} + {{- end -}} +{{- end -}} + +{{/* +Return the S3 root path +*/}} +{{- define "milvus.s3.rootPath" -}} + {{- if .Values.minio.enabled -}} + {{- print "file" -}} + {{- else -}} + {{- print .Values.externalS3.rootPath -}} + {{- end -}} +{{- end -}} + +{{/* +Return true if IAM is used (this is for cloud providers) +*/}} +{{- define "milvus.s3.useIAM" -}} + {{- if .Values.minio.enabled -}} + {{- print "false" -}} + {{- else -}} + {{- print .Values.externalS3.useIAM -}} + {{- end -}} +{{- end -}} + +{{/* +Return true if TLS is used +*/}} +{{- define "milvus.s3.useSSL" -}} + {{- if .Values.minio.enabled -}} + {{- .Values.minio.tls.enabled -}} + {{- else if (eq .Values.externalS3.protocol "https") -}} + {{- print "true" -}} + {{- else -}} + {{- print "false" -}} + {{- end -}} +{{- end -}} + +{{/* +Return the S3 port +*/}} +{{- define "milvus.s3.port" -}} +{{- ternary .Values.minio.service.ports.api .Values.externalS3.port .Values.minio.enabled -}} +{{- end -}} + +{{/* +Return the S3 credentials secret name +*/}} +{{- define "milvus.s3.secretName" -}} +{{- if .Values.minio.enabled -}} + {{- if .Values.minio.auth.existingSecret -}} + {{- print .Values.minio.auth.existingSecret -}} + {{- else -}} + {{- print (include "milvus.minio.fullname" .) -}} + {{- end -}} +{{- else if .Values.externalS3.existingSecret -}} + {{- print .Values.externalS3.existingSecret -}} +{{- else -}} + {{- printf "%s-%s" (include "common.names.fullname" .) "externals3" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the S3 access key id inside the secret +*/}} +{{- define "milvus.s3.accessKeyIDKey" -}} + {{- if .Values.minio.enabled -}} + {{- print "root-user" -}} + {{- else -}} + {{- print .Values.externalS3.existingSecretAccessKeyIDKey -}} + {{- end -}} +{{- end -}} + +{{/* +Return the S3 secret access key inside the secret +*/}} +{{- define "milvus.s3.secretAccessKeyKey" -}} + {{- if .Values.minio.enabled -}} + {{- print "root-password" -}} + {{- else -}} + {{- print .Values.externalS3.existingSecretKeySecretKey -}} + {{- end -}} +{{- end -}} + +{{/* +Return the S3 secret access key inside the secret +*/}} +{{- define "milvus.s3.deployed" -}} + {{- if or .Values.minio.enabled .Values.externalS3.host -}} + {{- true -}} + {{- end -}} +{{- end -}} + +{{/* +Init container definition for waiting for the database to be ready +*/}} +{{- define "milvus.waitForETCDInitContainer" -}} +- name: wait-for-etcd + image: {{ template "milvus.wait-container.image" . }} + imagePullPolicy: {{ .Values.waitContainer.image.pullPolicy }} + {{- if .Values.waitContainer.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.waitContainer.containerSecurityContext "enabled" | toYaml | nindent 4 }} + {{- end }} + command: + - bash + - -ec + - | + #!/bin/bash + retry_while() { + local -r cmd="${1:?cmd is missing}" + local -r retries="${2:-12}" + local -r sleep_time="${3:-5}" + local return_value=1 + + read -r -a command <<< "$cmd" + for ((i = 1 ; i <= retries ; i+=1 )); do + "${command[@]}" && return_value=0 && break + sleep "$sleep_time" + done + return $return_value + } + + etcd_hosts=( + {{- if .Values.etcd.enabled }} + "{{ ternary "https" "http" $.Values.etcd.auth.client.secureTransport }}://{{ printf "%s:%v" (include "milvus.etcd.fullname" $ ) (include "milvus.etcd.port" $ ) }}" + {{- else }} + {{- range $node :=.Values.externalEtcd.servers }} + "{{ ternary "https" "http" $.Values.externalEtcd.secureTransport }}://{{ printf "%s:%v" $node (include "milvus.etcd.port" $) }}" + {{- end }} + {{- end }} + ) + + check_etcd() { + local -r etcd_host="${1:-?missing etcd}" + if curl --max-time 5 "${etcd_host}/version" | grep etcdcluster; then + return 0 + else + return 1 + fi + } + + for host in "${etcd_hosts[@]}"; do + echo "Checking connection to $host" + if retry_while "check_etcd $host"; then + echo "Connected to $host" + else + echo "Error connecting to $host" + exit 1 + fi + done + + echo "Connection success" + exit 0 +{{- end -}} + +{{/* +Init container definition for waiting for the database to be ready +*/}} +{{- define "milvus.waitForS3InitContainer" -}} +- name: wait-for-s3 + image: {{ template "milvus.wait-container.image" . }} + imagePullPolicy: {{ .Values.waitContainer.image.pullPolicy }} + {{- if .Values.waitContainer.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.waitContainer.containerSecurityContext "enabled" | toYaml | nindent 4 }} + {{- end }} + command: + - bash + - -ec + - | + #!/bin/bash + retry_while() { + local -r cmd="${1:?cmd is missing}" + local -r retries="${2:-12}" + local -r sleep_time="${3:-5}" + local return_value=1 + + read -r -a command <<< "$cmd" + for ((i = 1 ; i <= retries ; i+=1 )); do + "${command[@]}" && return_value=0 && break + sleep "$sleep_time" + done + return $return_value + } + + check_s3() { + local -r s3_host="${1:-?missing s3}" + if curl --max-time 5 "${s3_host}" | grep "RequestId"; then + return 0 + else + return 1 + fi + } + + host={{ include "milvus.s3.host" . | quote }} + + echo "Checking connection to $host" + if retry_while "check_s3 $host"; then + echo "Connected to $host" + else + echo "Error connecting to $host" + exit 1 + fi + + echo "Connection success" + exit 0 +{{- end -}} + +{{/* +Init container definition for waiting for the database to be ready +*/}} +{{- define "milvus.waitForKafkaInitContainer" -}} +- name: wait-for-kafka + image: {{ template "milvus.image" . }} {{/* Bitnami shell does not have wait-for-port */}} + imagePullPolicy: {{ .Values.waitContainer.image.pullPolicy }} + {{- if .Values.waitContainer.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.waitContainer.containerSecurityContext "enabled" | toYaml | nindent 4 }} + {{- end }} + command: + - bash + - -ec + - | + #!/bin/bash + retry_while() { + local -r cmd="${1:?cmd is missing}" + local -r retries="${2:-12}" + local -r sleep_time="${3:-5}" + local return_value=1 + + read -r -a command <<< "$cmd" + for ((i = 1 ; i <= retries ; i+=1 )); do + "${command[@]}" && return_value=0 && break + sleep "$sleep_time" + done + return $return_value + } + + kafka_hosts=( + {{- if .Values.kafka.enabled }} + {{ include "milvus.kafka.fullname" . | quote }} + {{- else }} + {{- range $node :=.Values.externalKafka.servers }} + {{ print $node | quote }} + {{- end }} + {{- end }} + ) + + check_kafka() { + local -r kafka_host="${1:-?missing kafka}" + if wait-for-port --timeout=5 --host=${kafka_host} --state=inuse {{ include "milvus.kafka.port" . }}; then + return 0 + else + return 1 + fi + } + + for host in "${kafka_hosts[@]}"; do + echo "Checking connection to $host" + if retry_while "check_kafka $host"; then + echo "Connected to $host" + else + echo "Error connecting to $host" + exit 1 + fi + done + + echo "Connection success" + exit 0 +{{- end -}} + +{{/* +Init container definition for waiting for the database to be ready +*/}} +{{- define "milvus.waitForProxyInitContainer" -}} +- name: wait-for-proxy + image: {{ template "milvus.image" . }} {{/* Bitnami shell does not have wait-for-port */}} + imagePullPolicy: {{ .Values.waitContainer.image.pullPolicy }} + {{- if .Values.waitContainer.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.waitContainer.containerSecurityContext "enabled" | toYaml | nindent 4 }} + {{- end }} + command: + - bash + - -ec + - | + #!/bin/bash + retry_while() { + local -r cmd="${1:?cmd is missing}" + local -r retries="${2:-12}" + local -r sleep_time="${3:-5}" + local return_value=1 + + read -r -a command <<< "$cmd" + for ((i = 1 ; i <= retries ; i+=1 )); do + "${command[@]}" && return_value=0 && break + sleep "$sleep_time" + done + return $return_value + } + + check_proxy() { + local -r proxy_host="${1:-?missing proxy}" + if wait-for-port --timeout=5 --host=${proxy_host} --state=inuse {{ .Values.proxy.service.ports.grpc }}; then + return 0 + else + return 1 + fi + } + + host={{ include "milvus.proxy.fullname" . | quote }} + + echo "Checking connection to $host" + if retry_while "check_proxy $host"; then + echo "Connected to $host" + else + echo "Error connecting to $host" + exit 1 + fi + + echo "Connection success" + exit 0 +{{- end -}} + + +{{/* +Init container definition for waiting for the database to be ready +*/}} +{{- define "milvus.prepareMilvusInitContainer" -}} +# This init container renders and merges the Milvus configuration files. +# We need to use a volume because we're working with ReadOnlyRootFilesystem +- name: prepare-milvus + image: {{ template "milvus.image" .context }} + imagePullPolicy: {{ .context.Values.milvus.image.pullPolicy }} + {{- $block := index .context.Values .component }} + {{- if $block.containerSecurityContext.enabled }} + securityContext: {{- omit $block.containerSecurityContext "enabled" | toYaml | nindent 4 }} + {{- end }} + command: + - bash + - -ec + - | + #!/bin/bash + # Build final milvus.yaml with the sections of the different files + find /bitnami/milvus/conf -type f -name *.yaml -print0 | sort -z | xargs -0 yq eval-all '. as $item ireduce ({}; . * $item )' /opt/bitnami/milvus/configs/milvus.yaml > /bitnami/milvus/rendered-conf/pre-render-config_00.yaml + {{- if (include "milvus.kafka.deployed" .context) }} + # HACK: In order to enable Kafka we need to remove all Pulsar settings from the configuration file + # https://github.com/milvus-io/milvus/blob/master/configs/milvus.yaml#L110 + yq 'del(.pulsar)' /bitnami/milvus/rendered-conf/pre-render-config_00.yaml > /bitnami/milvus/rendered-conf/pre-render-config_01.yaml + {{- end }} + render-template /bitnami/milvus/rendered-conf/pre-render-config_01.yaml > /bitnami/milvus/rendered-conf/milvus.yaml + rm /bitnami/milvus/rendered-conf/pre-render-config* + chmod 644 /bitnami/milvus/rendered-conf/milvus.yaml + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .context.Values.milvus.image.debug .context.Values.diagnosticMode.enabled) | quote }} + {{- if and (include "milvus.kafka.deployed" .context) (include "milvus.kafka.authEnabled" .context) }} + - name: MILVUS_KAFKA_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "milvus.kafka.secretName" .context }} + key: {{ include "milvus.kafka.secretPasswordKey" .context }} + {{- end }} + {{- if and (include "milvus.s3.deployed" .context) }} + - name: MILVUS_S3_ACCESS_ID + valueFrom: + secretKeyRef: + name: {{ include "milvus.s3.secretName" .context }} + key: {{ include "milvus.s3.accessKeyIDKey" .context }} + - name: MILVUS_S3_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ include "milvus.s3.secretName" .context }} + key: {{ include "milvus.s3.secretAccessKeyKey" .context }} + {{- end }} + {{- if $block.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" $block.extraEnvVars "context" $) | nindent 4 }} + {{- end }} + envFrom: + {{- if $block.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" $block.extraEnvVarsCM "context" .context) }} + {{- end }} + {{- if $block.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" $block.extraEnvVarsSecret "context" $) }} + {{- end }} + volumeMounts: + - name: config-common + mountPath: /bitnami/milvus/conf/00_default + {{- if or .context.Values.milvus.extraConfig .context.Values.milvus.extraConfigExistingConfigMap }} + - name: extra-config-common + mountPath: /bitnami/milvus/conf/01_extra_common + {{- end }} + - name: component-config-default + mountPath: /bitnami/milvus/conf/02_component_default + {{- if or $block.extraConfig $block.extraConfigExistingConfigMap }} + - name: component-extra-config + mountPath: /bitnami/milvus/conf/03_extra + {{- end }} + - name: tmp + mountPath: /tmp + - name: rendered-config + mountPath: /bitnami/milvus/rendered-conf/ +{{- end -}} + +{{/* +Return true if the init job should be created +*/}} +{{- define "milvus.init-job.create" -}} +{{- if or (and .Values.milvus.auth.enabled .Release.IsInstall) .Values.initJob.forceRun -}} + {{- true -}} +{{- else -}} + {{/* Do not return anything */}} +{{- end -}} +{{- end -}} + +{{/* +Check if there are rolling tags in the images +*/}} +{{- define "milvus.checkRollingTags" -}} +{{- include "common.warnings.rollingTag" .Values.milvus.image }} +{{- include "common.warnings.rollingTag" .Values.attu.image }} +{{- include "common.warnings.rollingTag" .Values.initJob.image }} +{{- end -}} + +{{/* +Compile all warnings into a single message. +*/}} +{{- define "milvus.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "milvus.validateValues.controllers" .) -}} +{{- $messages := append $messages (include "milvus.validateValues.attu" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message -}} +{{- end -}} +{{- end -}} + +{{/* +Function to validate the controller deployment +*/}} +{{- define "milvus.validateValues.controllers" -}} +{{- if not (or .Values.dataCoord.enabled .Values.rootCoord.enabled .Values.indexCoord.enabled .Values.queryCoord.enabled .Values.dataNode.enabled .Values.queryNode.enabled .Values.indexNode.enabled) -}} +milvus: Missing controllers. At least one controller should be enabled. +{{- end -}} +{{- end -}} + +{{/* +Function to validate the controller deployment +*/}} +{{- define "milvus.validateValues.attu" -}} +{{- if and .Values.attu.enabled (not .Values.proxy.enabled) -}} +attu: Attu requires the Milvus proxy to be enabled +{{- end -}} +{{- end -}} diff --git a/bitnami/milvus/templates/attu/deployment.yaml b/bitnami/milvus/templates/attu/deployment.yaml new file mode 100644 index 0000000000..7a4520fb1d --- /dev/null +++ b/bitnami/milvus/templates/attu/deployment.yaml @@ -0,0 +1,179 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.attu.enabled }} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "milvus.attu.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: attu + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.attu.autoscaling.hpa.enabled }} + replicas: {{ .Values.attu.replicaCount }} + {{- end }} + {{- if .Values.attu.updateStrategy }} + strategy: {{- toYaml .Values.attu.updateStrategy | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: attu + template: + metadata: + annotations: + {{- if .Values.attu.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.attu.podAnnotations "context" $) | nindent 8 }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: attu + {{- if .Values.attu.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.attu.podLabels "context" $) | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "milvus.attu.serviceAccountName" . }} + {{- include "milvus.imagePullSecrets" . | nindent 6 }} + {{- if .Values.attu.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.attu.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.attu.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.attu.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.attu.podAffinityPreset "component" "attu" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.attu.podAntiAffinityPreset "component" "attu" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.attu.nodeAffinityPreset.type "key" .Values.attu.nodeAffinityPreset.key "values" .Values.attu.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.attu.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.attu.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.attu.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.attu.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.attu.runtimeClassName }} + runtimeClassName: {{ .Values.attu.runtimeClassName | quote }} + {{- end }} + {{- if .Values.attu.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.attu.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.attu.priorityClassName }} + priorityClassName: {{ .Values.attu.priorityClassName | quote }} + {{- end }} + {{- if .Values.attu.schedulerName }} + schedulerName: {{ .Values.attu.schedulerName }} + {{- end }} + {{- if .Values.attu.podSecurityContext.enabled }} + securityContext: {{- omit .Values.attu.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- if .Values.attu.enableDefaultInitContainers }} + {{- include "milvus.waitForProxyInitContainer" . | nindent 8 }} + {{- end }} + {{- if .Values.attu.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.attu.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: attu + image: {{ template "milvus.attu.image" . }} + imagePullPolicy: {{ .Values.attu.image.pullPolicy }} + {{- if .Values.attu.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.attu.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.attu.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.attu.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.attu.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.attu.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: MILVUS_URL + value: {{ printf "http://%s:%v" (include "milvus.proxy.fullname" .) .Values.proxy.service.ports.grpc }} + {{- if .Values.attu.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.attu.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.attu.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.attu.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.attu.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.attu.extraEnvVarsSecret "context" $) }} + {{- end }} + ports: + - containerPort: {{ .Values.attu.containerPorts.http }} + name: http + {{- if .Values.attu.resources }} + resources: {{- toYaml .Values.attu.resources | nindent 12 }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.attu.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.attu.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.attu.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.attu.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: http + {{- end }} + {{- if .Values.attu.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.attu.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.attu.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.attu.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: / + port: http + {{- end }} + {{- if .Values.attu.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.attu.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.attu.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.attu.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: http + {{- end }} + {{- end }} + {{- if .Values.attu.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.attu.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + - name: tmp + mountPath: /tmp + - name: tmp-npm + mountPath: /.npm + - name: tmp-yarn + mountPath: /.yarn + - name: tmp-yarn-cache + mountPath: /.cache/yarn + {{- if .Values.attu.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.attu.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.attu.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.attu.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: tmp-yarn + emptyDir: {} + - name: tmp-yarn-cache + emptyDir: {} + - name: tmp-npm + emptyDir: {} + - name: tmp + emptyDir: {} + {{- if .Values.attu.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.attu.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/bitnami/milvus/templates/attu/hpa.yaml b/bitnami/milvus/templates/attu/hpa.yaml new file mode 100644 index 0000000000..74d4b1ceab --- /dev/null +++ b/bitnami/milvus/templates/attu/hpa.yaml @@ -0,0 +1,59 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.attu.autoscaling.hpa.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "milvus.attu.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: attu + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.attu.autoscaling.hpa.annotations }} + annotations: + {{- if .Values.attu.autoscaling.hpa.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.attu.autoscaling.hpa.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + name: {{ template "milvus.attu.fullname" . }} + minReplicas: {{ .Values.attu.autoscaling.hpa.minReplicas }} + maxReplicas: {{ .Values.attu.autoscaling.hpa.maxReplicas }} + metrics: + {{- if .Values.attu.autoscaling.hpa.targetCPU }} + - type: Resource + resource: + name: cpu + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.attu.autoscaling.hpa.targetCPU }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.attu.autoscaling.hpa.targetCPU }} + {{- end }} + {{- end }} + {{- if .Values.attu.autoscaling.hpa.targetMemory }} + - type: Resource + resource: + name: memory + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.attu.autoscaling.hpa.targetMemory }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.attu.autoscaling.hpa.targetMemory }} + {{- end }} + {{- end }} +{{- end }} diff --git a/bitnami/milvus/templates/attu/ingress-tls-secret.yaml b/bitnami/milvus/templates/attu/ingress-tls-secret.yaml new file mode 100644 index 0000000000..44e15b778e --- /dev/null +++ b/bitnami/milvus/templates/attu/ingress-tls-secret.yaml @@ -0,0 +1,54 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.attu.ingress.enabled }} +{{- if .Values.attu.ingress.secrets }} +{{- range .Values.attu.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ include "common.names.namespace" $ | quote }} + labels: {{- include "common.labels.standard" $ | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: attu + {{- if $.Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" $.Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if $.Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" $.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +--- +{{- end }} +{{- end }} +{{- if and .Values.attu.ingress.tls .Values.attu.ingress.selfSigned }} +{{- $secretName := printf "%s-tls" .Values.attu.ingress.hostname }} +{{- $ca := genCA "server-ca" 365 }} +{{- $cert := genSignedCert .Values.attu.ingress.hostname nil (list .Values.attu.ingress.hostname) 365 $ca }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ $secretName }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: attu + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: kubernetes.io/tls +data: + tls.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.crt" "defaultValue" $cert.Cert "context" $) }} + tls.key: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "tls.key" "defaultValue" $cert.Key "context" $) }} + ca.crt: {{ include "common.secrets.lookup" (dict "secret" $secretName "key" "ca.crt" "defaultValue" $ca.Cert "context" $) }} +{{- end }} +{{- end }} diff --git a/bitnami/milvus/templates/attu/ingress.yaml b/bitnami/milvus/templates/attu/ingress.yaml new file mode 100644 index 0000000000..199329bcfb --- /dev/null +++ b/bitnami/milvus/templates/attu/ingress.yaml @@ -0,0 +1,69 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.attu.enabled .Values.attu.ingress.enabled }} +apiVersion: {{ include "common.capabilities.ingress.apiVersion" . }} +kind: Ingress +metadata: + name: {{ template "milvus.attu.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: attu + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.attu.ingress.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.attu.ingress.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.attu.ingress.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + {{- if and .Values.attu.ingress.ingressClassName (eq "true" (include "common.ingress.supportsIngressClassname" .)) }} + ingressClassName: {{ .Values.attu.ingress.ingressClassName | quote }} + {{- end }} + rules: + {{- if .Values.attu.ingress.hostname }} + - host: {{ .Values.attu.ingress.hostname }} + http: + paths: + {{- if .Values.attu.ingress.extraPaths }} + {{- toYaml .Values.attu.ingress.extraPaths | nindent 10 }} + {{- end }} + - path: {{ .Values.attu.ingress.path }} + {{- if eq "true" (include "common.ingress.supportsPathType" .) }} + pathType: {{ .Values.attu.ingress.pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "milvus.attu.fullname" . | trunc 63 | trimSuffix "-") "servicePort" "http" "context" $) | nindent 14 }} + {{- end }} + {{- range .Values.attu.ingress.extraHosts }} + - host: {{ .name | quote }} + http: + paths: + - path: {{ default "/" .path }} + {{- if eq "true" (include "common.ingress.supportsPathType" $) }} + pathType: {{ default "ImplementationSpecific" .pathType }} + {{- end }} + backend: {{- include "common.ingress.backend" (dict "serviceName" (include "milvus.attu.fullname" $ | trunc 63 | trimSuffix "-") "servicePort" "http" "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.attu.ingress.extraRules }} + {{- include "common.tplvalues.render" (dict "value" .Values.attu.ingress.extraRules "context" $) | nindent 4 }} + {{- end }} + {{- if or (and .Values.attu.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.attu.ingress.annotations )) .Values.attu.ingress.selfSigned)) .Values.attu.ingress.extraTls }} + tls: + {{- if and .Values.attu.ingress.tls (or (include "common.ingress.certManagerRequest" ( dict "annotations" .Values.attu.ingress.annotations )) .Values.attu.ingress.selfSigned) }} + - hosts: + - {{ .Values.attu.ingress.hostname | quote }} + secretName: {{ printf "%s-tls" .Values.attu.ingress.hostname }} + {{- end }} + {{- if .Values.attu.ingress.extraTls }} + {{- include "common.tplvalues.render" (dict "value" .Values.attu.ingress.extraTls "context" $) | nindent 4 }} + {{- end }} + {{- end }} +{{- end }} diff --git a/bitnami/milvus/templates/attu/networkpolicy.yaml b/bitnami/milvus/templates/attu/networkpolicy.yaml new file mode 100644 index 0000000000..4e01cd38f1 --- /dev/null +++ b/bitnami/milvus/templates/attu/networkpolicy.yaml @@ -0,0 +1,85 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.attu.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "milvus.attu.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: attu + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: attu + policyTypes: + - Ingress + - Egress + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.attu.service.ports.http }} + - port: {{ .Values.queryCoord.service.ports.grpc }} + - port: {{ .Values.dataCoord.service.ports.grpc }} + - port: {{ .Values.indexCoord.service.ports.grpc }} + - port: {{ .Values.rootCoord.service.ports.grpc }} + - port: {{ .Values.queryNode.service.ports.grpc }} + - port: {{ .Values.dataNode.service.ports.grpc }} + - port: {{ .Values.indexNode.service.ports.grpc }} + - port: {{ .Values.proxy.service.ports.grpc }} + - port: {{ include "milvus.etcd.port" . }} + - port: {{ include "milvus.s3.port" . }} + - port: {{ include "milvus.kafka.port" . }} + to: + - podSelector: + matchLabels: {{- include "common.labels.standard" . | nindent 14 }} + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.attu.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.attu.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + ingress: + - ports: + - port: {{ .Values.attu.service.ports.http }} + {{- if not .Values.attu.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + {{- end }} + {{- if .Values.attu.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.attu.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.attu.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.attu.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.attu.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.attu.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/bitnami/milvus/templates/attu/pdb.yaml b/bitnami/milvus/templates/attu/pdb.yaml new file mode 100644 index 0000000000..bd08d62c99 --- /dev/null +++ b/bitnami/milvus/templates/attu/pdb.yaml @@ -0,0 +1,32 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.attu.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ template "milvus.attu.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: attu + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.attu.pdb.minAvailable }} + minAvailable: {{ .Values.attu.pdb.minAvailable }} + {{- end }} + {{- if .Values.attu.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.attu.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: attu +{{- end }} diff --git a/bitnami/milvus/templates/attu/service-account.yaml b/bitnami/milvus/templates/attu/service-account.yaml new file mode 100644 index 0000000000..4af8a1b2a9 --- /dev/null +++ b/bitnami/milvus/templates/attu/service-account.yaml @@ -0,0 +1,26 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.attu.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "milvus.attu.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: attu + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.attu.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.attu.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.attu.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/bitnami/milvus/templates/attu/service.yaml b/bitnami/milvus/templates/attu/service.yaml new file mode 100644 index 0000000000..3972f80835 --- /dev/null +++ b/bitnami/milvus/templates/attu/service.yaml @@ -0,0 +1,63 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.attu.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "milvus.attu.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: attu + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.attu.service.annotations }} + annotations: + {{- if .Values.attu.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.attu.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.attu.service.type }} + {{- if .Values.attu.service.sessionAffinity }} + sessionAffinity: {{ .Values.attu.service.sessionAffinity }} + {{- end }} + {{- if .Values.attu.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.attu.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.attu.service.clusterIP }} + clusterIP: {{ .Values.attu.service.clusterIP }} + {{- end }} + {{- if (or (eq .Values.attu.service.type "LoadBalancer") (eq .Values.attu.service.type "NodePort")) }} + externalTrafficPolicy: {{ .Values.attu.service.externalTrafficPolicy | quote }} + {{- end }} + {{ if eq .Values.attu.service.type "LoadBalancer" }} + loadBalancerSourceRanges: {{ .Values.attu.service.loadBalancerSourceRanges }} + {{ end }} + {{- if (and (eq .Values.attu.service.type "LoadBalancer") (not (empty .Values.attu.service.loadBalancerIP))) }} + loadBalancerIP: {{ .Values.attu.service.loadBalancerIP }} + {{- end }} + ports: + - name: http + port: {{ .Values.attu.service.ports.http }} + targetPort: http + protocol: TCP + {{- if (and (or (eq .Values.attu.service.type "NodePort") (eq .Values.attu.service.type "LoadBalancer")) (not (empty .Values.attu.service.nodePorts.http))) }} + nodePort: {{ .Values.attu.service.nodePorts.http }} + {{- else if eq .Values.attu.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.attu.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.attu.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: attu +{{- end }} diff --git a/bitnami/milvus/templates/attu/vpa.yaml b/bitnami/milvus/templates/attu/vpa.yaml new file mode 100644 index 0000000000..33c9148182 --- /dev/null +++ b/bitnami/milvus/templates/attu/vpa.yaml @@ -0,0 +1,51 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1/VerticalPodAutoscaler") .Values.attu.autoscaling.vpa.enabled }} +apiVersion: {{ include "common.capabilities.vpa.apiVersion" . }} +kind: VerticalPodAutoscaler +metadata: + name: {{ include "milvus.attu.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: attu + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.attu.autoscaling.vpa.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.attu.autoscaling.vpa.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + resourcePolicy: + containerPolicies: + - containerName: milvus-attu + {{- with .Values.attu.autoscaling.vpa.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.attu.autoscaling.vpa.maxAllowed }} + maxAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.attu.autoscaling.vpa.minAllowed }} + minAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + targetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + name: {{ include "milvus.attu.fullname" . }} + {{- if .Values.attu.autoscaling.vpa.updatePolicy }} + updatePolicy: + {{- with .Values.attu.autoscaling.vpa.updatePolicy.updateMode }} + updateMode: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/bitnami/milvus/templates/configmap.yaml b/bitnami/milvus/templates/configmap.yaml new file mode 100644 index 0000000000..a69379ba3d --- /dev/null +++ b/bitnami/milvus/templates/configmap.yaml @@ -0,0 +1,23 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if not .Values.milvus.existingConfigMap }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "common.names.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + 00_milvus_default.yaml: | + {{- include "common.tplvalues.render" (dict "value" .Values.milvus.defaultConfig "context" $) | nindent 4 }} +{{- end }} diff --git a/bitnami/milvus/templates/data-coordinator/configmap.yaml b/bitnami/milvus/templates/data-coordinator/configmap.yaml new file mode 100644 index 0000000000..c8ed3dce56 --- /dev/null +++ b/bitnami/milvus/templates/data-coordinator/configmap.yaml @@ -0,0 +1,23 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if not .Values.dataCoord.existingConfigMap }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "milvus.data-coordinator.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + 03_data_coordinator_default.yaml: | + {{- include "common.tplvalues.render" (dict "value" .Values.dataCoord.defaultConfig "context" $) | nindent 4 }} +{{- end }} diff --git a/bitnami/milvus/templates/data-coordinator/deployment.yaml b/bitnami/milvus/templates/data-coordinator/deployment.yaml new file mode 100644 index 0000000000..00afa91d43 --- /dev/null +++ b/bitnami/milvus/templates/data-coordinator/deployment.yaml @@ -0,0 +1,208 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.dataCoord.enabled }} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "milvus.data-coordinator.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-coordinator + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.dataCoord.autoscaling.hpa.enabled }} + replicas: {{ .Values.dataCoord.replicaCount }} + {{- end }} + {{- if .Values.dataCoord.updateStrategy }} + strategy: {{- toYaml .Values.dataCoord.updateStrategy | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-coordinator + template: + metadata: + annotations: + checksum/common-config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/common-config-extra: {{ include (print $.Template.BasePath "/extra-configmap.yaml") . | sha256sum }} + checksum/config-extra: {{ include (print $.Template.BasePath "/data-coordinator/extra-configmap.yaml") . | sha256sum }} + {{- if .Values.dataCoord.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.dataCoord.podAnnotations "context" $) | nindent 8 }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-coordinator + {{- if .Values.dataCoord.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.dataCoord.podLabels "context" $) | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "milvus.data-coordinator.serviceAccountName" . }} + {{- include "milvus.imagePullSecrets" . | nindent 6 }} + {{- if .Values.dataCoord.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.dataCoord.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.dataCoord.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.dataCoord.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.dataCoord.podAffinityPreset "component" "data-coordinator" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.dataCoord.podAntiAffinityPreset "component" "data-coordinator" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.dataCoord.nodeAffinityPreset.type "key" .Values.dataCoord.nodeAffinityPreset.key "values" .Values.dataCoord.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.dataCoord.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.dataCoord.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.dataCoord.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.dataCoord.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.dataCoord.runtimeClassName }} + runtimeClassName: {{ .Values.dataCoord.runtimeClassName | quote }} + {{- end }} + {{- if .Values.dataCoord.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.dataCoord.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.dataCoord.priorityClassName }} + priorityClassName: {{ .Values.dataCoord.priorityClassName | quote }} + {{- end }} + {{- if .Values.dataCoord.schedulerName }} + schedulerName: {{ .Values.dataCoord.schedulerName }} + {{- end }} + {{- if .Values.dataCoord.podSecurityContext.enabled }} + securityContext: {{- omit .Values.dataCoord.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- if .Values.dataCoord.enableDefaultInitContainers }} + {{- include "milvus.waitForETCDInitContainer" . | nindent 8 }} + {{- include "milvus.waitForKafkaInitContainer" . | nindent 8 }} + {{- include "milvus.waitForS3InitContainer" . | nindent 8 }} + {{- include "milvus.prepareMilvusInitContainer" (dict "component" "dataCoord" "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.dataCoord.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.dataCoord.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: milvus + image: {{ template "milvus.image" . }} + imagePullPolicy: {{ .Values.milvus.image.pullPolicy }} + {{- if .Values.dataCoord.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.dataCoord.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.dataCoord.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.dataCoord.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.dataCoord.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.dataCoord.args "context" $) | nindent 12 }} + {{- else }} + args: + - run + - datacoord + {{- end }} + env: + - name: METRICS_PORT + value: {{ .Values.dataCoord.containerPorts.metrics | quote }} + {{- if .Values.dataCoord.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.dataCoord.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.dataCoord.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.dataCoord.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.dataCoord.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.dataCoord.extraEnvVarsSecret "context" $) }} + {{- end }} + ports: + - containerPort: {{ .Values.dataCoord.containerPorts.grpc }} + name: grpc + - containerPort: {{ .Values.dataCoord.containerPorts.metrics }} + name: http-metrics + {{- if .Values.dataCoord.resources }} + resources: {{- toYaml .Values.dataCoord.resources | nindent 12 }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.dataCoord.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.dataCoord.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.dataCoord.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.dataCoord.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /healthz + port: http-metrics + {{- end }} + {{- if .Values.dataCoord.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.dataCoord.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.dataCoord.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.dataCoord.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /healthz + port: http-metrics + {{- end }} + {{- if .Values.dataCoord.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.dataCoord.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.dataCoord.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.dataCoord.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: http-metrics + {{- end }} + {{- end }} + {{- if .Values.dataCoord.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.dataCoord.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + - name: rendered-config + mountPath: /opt/bitnami/milvus/configs + - name: tmp + mountPath: /tmp + - name: tmp-milvus + mountPath: /opt/bitnami/milvus/tmp + # We are using a s3 backend, so this data dir is temporary + - name: tmp-data-milvus + mountPath: /bitnami/milvus/data + {{- if .Values.dataCoord.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.dataCoord.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.dataCoord.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.dataCoord.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: tmp-data-milvus + emptyDir: {} + - name: tmp-milvus + emptyDir: {} + - name: tmp + emptyDir: {} + - name: config-common + configMap: + name: {{ template "milvus.configmapName" . }} + {{- if or .Values.milvus.extraConfig .Values.milvus.extraConfigExistingConfigMap }} + - name: extra-config-common + configMap: + name: {{ template "milvus.extraConfigmapName" . }} + {{- end }} + - name: component-config-default + configMap: + name: {{ template "milvus.data-coordinator.configmapName" . }} + {{- if or .Values.dataCoord.extraConfig .Values.dataCoord.extraConfigExistingConfigMap }} + - name: component-extra-config + configMap: + name: {{ template "milvus.data-coordinator.extraConfigmapName" . }} + {{- end }} + - name: rendered-config + emptyDir: {} + {{- if .Values.dataCoord.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.dataCoord.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/bitnami/milvus/templates/data-coordinator/extra-configmap.yaml b/bitnami/milvus/templates/data-coordinator/extra-configmap.yaml new file mode 100644 index 0000000000..dd35e6e701 --- /dev/null +++ b/bitnami/milvus/templates/data-coordinator/extra-configmap.yaml @@ -0,0 +1,24 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.dataCoord.extraConfig (not .Values.dataCoord.extraConfigExistingConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-extra" (include "milvus.data-coordinator.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-coordinator + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + 04_data_coordinator_extra.yaml: | + {{- include "common.tplvalues.render" (dict "value" .Values.dataCoord.extraConfig "context" $) | nindent 4 }} +{{- end }} diff --git a/bitnami/milvus/templates/data-coordinator/hpa.yaml b/bitnami/milvus/templates/data-coordinator/hpa.yaml new file mode 100644 index 0000000000..bbefbe7a72 --- /dev/null +++ b/bitnami/milvus/templates/data-coordinator/hpa.yaml @@ -0,0 +1,59 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.dataCoord.autoscaling.hpa.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "milvus.data-coordinator.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-coordinator + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.dataCoord.autoscaling.hpa.annotations }} + annotations: + {{- if .Values.dataCoord.autoscaling.hpa.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.dataCoord.autoscaling.hpa.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + name: {{ template "milvus.data-coordinator.fullname" . }} + minReplicas: {{ .Values.dataCoord.autoscaling.hpa.minReplicas }} + maxReplicas: {{ .Values.dataCoord.autoscaling.hpa.maxReplicas }} + metrics: + {{- if .Values.dataCoord.autoscaling.hpa.targetCPU }} + - type: Resource + resource: + name: cpu + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.dataCoord.autoscaling.hpa.targetCPU }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.dataCoord.autoscaling.hpa.targetCPU }} + {{- end }} + {{- end }} + {{- if .Values.dataCoord.autoscaling.hpa.targetMemory }} + - type: Resource + resource: + name: memory + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.dataCoord.autoscaling.hpa.targetMemory }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.dataCoord.autoscaling.hpa.targetMemory }} + {{- end }} + {{- end }} +{{- end }} diff --git a/bitnami/milvus/templates/data-coordinator/networkpolicy.yaml b/bitnami/milvus/templates/data-coordinator/networkpolicy.yaml new file mode 100644 index 0000000000..6f18ca7313 --- /dev/null +++ b/bitnami/milvus/templates/data-coordinator/networkpolicy.yaml @@ -0,0 +1,91 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.dataCoord.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "milvus.data-coordinator.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-coordinator + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-coordinator + policyTypes: + - Ingress + - Egress + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.queryCoord.service.ports.grpc }} + - port: {{ .Values.dataCoord.service.ports.grpc }} + - port: {{ .Values.indexCoord.service.ports.grpc }} + - port: {{ .Values.rootCoord.service.ports.grpc }} + - port: {{ .Values.queryNode.service.ports.grpc }} + - port: {{ .Values.dataNode.service.ports.grpc }} + - port: {{ .Values.indexNode.service.ports.grpc }} + - port: {{ .Values.proxy.service.ports.grpc }} + - port: {{ include "milvus.etcd.port" . }} + - port: {{ include "milvus.s3.port" . }} + - port: {{ include "milvus.kafka.port" . }} + - port: {{ include "milvus.etcd.port" . }} + - port: {{ include "milvus.s3.port" . }} + - port: {{ include "milvus.kafka.port" . }} + to: + - podSelector: + matchLabels: {{- include "common.labels.standard" . | nindent 14 }} + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.dataCoord.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.dataCoord.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + ingress: + - ports: + - port: {{ .Values.dataCoord.service.ports.grpc }} + {{- if not .Values.dataCoord.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + {{- end }} + {{- if .Values.dataCoord.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.dataCoord.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.dataCoord.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.dataCoord.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.dataCoord.metrics.enabled }} + - ports: + - port: {{ .Values.dataCoord.service.ports.metrics }} + {{- end }} + {{- if .Values.dataCoord.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.dataCoord.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/bitnami/milvus/templates/data-coordinator/pdb.yaml b/bitnami/milvus/templates/data-coordinator/pdb.yaml new file mode 100644 index 0000000000..7d278415da --- /dev/null +++ b/bitnami/milvus/templates/data-coordinator/pdb.yaml @@ -0,0 +1,32 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.dataCoord.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ template "milvus.data-coordinator.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-coordinator + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.dataCoord.pdb.minAvailable }} + minAvailable: {{ .Values.dataCoord.pdb.minAvailable }} + {{- end }} + {{- if .Values.dataCoord.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.dataCoord.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-coordinator +{{- end }} diff --git a/bitnami/milvus/templates/data-coordinator/service-account.yaml b/bitnami/milvus/templates/data-coordinator/service-account.yaml new file mode 100644 index 0000000000..9301a2877c --- /dev/null +++ b/bitnami/milvus/templates/data-coordinator/service-account.yaml @@ -0,0 +1,26 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.dataCoord.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "milvus.data-coordinator.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-coordinator + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.dataCoord.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.dataCoord.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.dataCoord.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/bitnami/milvus/templates/data-coordinator/service.yaml b/bitnami/milvus/templates/data-coordinator/service.yaml new file mode 100644 index 0000000000..0af0196fac --- /dev/null +++ b/bitnami/milvus/templates/data-coordinator/service.yaml @@ -0,0 +1,77 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.dataCoord.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "milvus.data-coordinator.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-coordinator + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.dataCoord.service.annotations }} + annotations: + {{- if .Values.dataCoord.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.dataCoord.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if and .Values.dataCoord.metrics.enabled .Values.dataCoord.metrics.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.dataCoord.metrics.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.dataCoord.service.type }} + {{- if .Values.dataCoord.service.sessionAffinity }} + sessionAffinity: {{ .Values.dataCoord.service.sessionAffinity }} + {{- end }} + {{- if .Values.dataCoord.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.dataCoord.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.dataCoord.service.clusterIP }} + clusterIP: {{ .Values.dataCoord.service.clusterIP }} + {{- end }} + {{- if (or (eq .Values.dataCoord.service.type "LoadBalancer") (eq .Values.dataCoord.service.type "NodePort")) }} + externalTrafficPolicy: {{ .Values.dataCoord.service.externalTrafficPolicy | quote }} + {{- end }} + {{ if eq .Values.dataCoord.service.type "LoadBalancer" }} + loadBalancerSourceRanges: {{ .Values.dataCoord.service.loadBalancerSourceRanges }} + {{ end }} + {{- if (and (eq .Values.dataCoord.service.type "LoadBalancer") (not (empty .Values.dataCoord.service.loadBalancerIP))) }} + loadBalancerIP: {{ .Values.dataCoord.service.loadBalancerIP }} + {{- end }} + ports: + - name: grpc + port: {{ .Values.dataCoord.service.ports.grpc }} + targetPort: grpc + protocol: TCP + {{- if (and (or (eq .Values.dataCoord.service.type "NodePort") (eq .Values.dataCoord.service.type "LoadBalancer")) (not (empty .Values.dataCoord.service.nodePorts.grpc))) }} + nodePort: {{ .Values.dataCoord.service.nodePorts.grpc }} + {{- else if eq .Values.dataCoord.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.dataCoord.metrics.enabled }} + - name: http-metrics + port: {{ .Values.dataCoord.service.ports.metrics }} + targetPort: http-metrics + protocol: TCP + {{- if (and (or (eq .Values.dataCoord.service.type "NodePort") (eq .Values.dataCoord.service.type "LoadBalancer")) (not (empty .Values.dataCoord.service.nodePorts.metrics))) }} + nodePort: {{ .Values.dataCoord.service.nodePorts.metrics }} + {{- else if eq .Values.dataCoord.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- end }} + {{- if .Values.dataCoord.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.dataCoord.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-coordinator +{{- end }} diff --git a/bitnami/milvus/templates/data-coordinator/servicemonitor.yaml b/bitnami/milvus/templates/data-coordinator/servicemonitor.yaml new file mode 100644 index 0000000000..f6e7773d88 --- /dev/null +++ b/bitnami/milvus/templates/data-coordinator/servicemonitor.yaml @@ -0,0 +1,61 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.dataCoord.enabled .Values.dataCoord.metrics.enabled .Values.dataCoord.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "milvus.data-coordinator.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.dataCoord.metrics.serviceMonitor.namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-coordinator + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.dataCoord.metrics.serviceMonitor.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.dataCoord.metrics.serviceMonitor.labels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.dataCoord.metrics.serviceMonitor.annotations }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.dataCoord.metrics.serviceMonitor.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.dataCoord.metrics.serviceMonitor.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + {{- if .Values.dataCoord.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.dataCoord.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.dataCoord.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.dataCoord.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-coordinator + endpoints: + - port: http-metrics + {{- if .Values.dataCoord.metrics.serviceMonitor.interval }} + interval: {{ .Values.dataCoord.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.dataCoord.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.dataCoord.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.dataCoord.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.dataCoord.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.dataCoord.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- toYaml .Values.dataCoord.metrics.serviceMonitor.metricRelabelings | nindent 6 }} + {{- end }} + {{- if .Values.dataCoord.metrics.serviceMonitor.relabelings }} + relabelings: {{- toYaml .Values.dataCoord.metrics.serviceMonitor.relabelings | nindent 6 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . | quote }} +{{- end }} diff --git a/bitnami/milvus/templates/data-coordinator/vpa.yaml b/bitnami/milvus/templates/data-coordinator/vpa.yaml new file mode 100644 index 0000000000..a7ec41bc6b --- /dev/null +++ b/bitnami/milvus/templates/data-coordinator/vpa.yaml @@ -0,0 +1,51 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1/VerticalPodAutoscaler") .Values.dataCoord.autoscaling.vpa.enabled }} +apiVersion: {{ include "common.capabilities.vpa.apiVersion" . }} +kind: VerticalPodAutoscaler +metadata: + name: {{ include "milvus.data-coordinator.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-coordinator + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.dataCoord.autoscaling.vpa.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.dataCoord.autoscaling.vpa.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + resourcePolicy: + containerPolicies: + - containerName: milvus-data-coordinator + {{- with .Values.dataCoord.autoscaling.vpa.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.dataCoord.autoscaling.vpa.maxAllowed }} + maxAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.dataCoord.autoscaling.vpa.minAllowed }} + minAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + targetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + name: {{ include "milvus.data-coordinator.fullname" . }} + {{- if .Values.dataCoord.autoscaling.vpa.updatePolicy }} + updatePolicy: + {{- with .Values.dataCoord.autoscaling.vpa.updatePolicy.updateMode }} + updateMode: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/bitnami/milvus/templates/data-node/configmap.yaml b/bitnami/milvus/templates/data-node/configmap.yaml new file mode 100644 index 0000000000..a5e778dafd --- /dev/null +++ b/bitnami/milvus/templates/data-node/configmap.yaml @@ -0,0 +1,23 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if not .Values.dataNode.existingConfigMap }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "milvus.data-node.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + 03_data_node_default.yaml: | + {{- include "common.tplvalues.render" (dict "value" .Values.dataNode.defaultConfig "context" $) | nindent 4 }} +{{- end }} diff --git a/bitnami/milvus/templates/data-node/deployment.yaml b/bitnami/milvus/templates/data-node/deployment.yaml new file mode 100644 index 0000000000..3da58d42de --- /dev/null +++ b/bitnami/milvus/templates/data-node/deployment.yaml @@ -0,0 +1,208 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.dataNode.enabled }} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "milvus.data-node.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-node + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.dataNode.autoscaling.hpa.enabled }} + replicas: {{ .Values.dataNode.replicaCount }} + {{- end }} + {{- if .Values.dataNode.updateStrategy }} + strategy: {{- toYaml .Values.dataNode.updateStrategy | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-node + template: + metadata: + annotations: + checksum/common-config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/common-config-extra: {{ include (print $.Template.BasePath "/extra-configmap.yaml") . | sha256sum }} + checksum/config-extra: {{ include (print $.Template.BasePath "/data-node/extra-configmap.yaml") . | sha256sum }} + {{- if .Values.dataNode.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.dataNode.podAnnotations "context" $) | nindent 8 }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-node + {{- if .Values.dataNode.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.dataNode.podLabels "context" $) | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "milvus.data-node.serviceAccountName" . }} + {{- include "milvus.imagePullSecrets" . | nindent 6 }} + {{- if .Values.dataNode.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.dataNode.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.dataNode.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.dataNode.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.dataNode.podAffinityPreset "component" "data-node" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.dataNode.podAntiAffinityPreset "component" "data-node" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.dataNode.nodeAffinityPreset.type "key" .Values.dataNode.nodeAffinityPreset.key "values" .Values.dataNode.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.dataNode.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.dataNode.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.dataNode.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.dataNode.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.dataNode.runtimeClassName }} + runtimeClassName: {{ .Values.dataNode.runtimeClassName | quote }} + {{- end }} + {{- if .Values.dataNode.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.dataNode.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.dataNode.priorityClassName }} + priorityClassName: {{ .Values.dataNode.priorityClassName | quote }} + {{- end }} + {{- if .Values.dataNode.schedulerName }} + schedulerName: {{ .Values.dataNode.schedulerName }} + {{- end }} + {{- if .Values.dataNode.podSecurityContext.enabled }} + securityContext: {{- omit .Values.dataNode.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- if .Values.dataNode.enableDefaultInitContainers }} + {{- include "milvus.waitForETCDInitContainer" . | nindent 8 }} + {{- include "milvus.waitForKafkaInitContainer" . | nindent 8 }} + {{- include "milvus.waitForS3InitContainer" . | nindent 8 }} + {{- include "milvus.prepareMilvusInitContainer" (dict "component" "dataNode" "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.dataNode.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.dataNode.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: milvus + image: {{ template "milvus.image" . }} + imagePullPolicy: {{ .Values.milvus.image.pullPolicy }} + {{- if .Values.dataNode.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.dataNode.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.dataNode.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.dataNode.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.dataNode.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.dataNode.args "context" $) | nindent 12 }} + {{- else }} + args: + - run + - datanode + {{- end }} + env: + - name: METRICS_PORT + value: {{ .Values.dataNode.containerPorts.metrics | quote }} + {{- if .Values.dataNode.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.dataNode.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.dataNode.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.dataNode.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.dataNode.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.dataNode.extraEnvVarsSecret "context" $) }} + {{- end }} + ports: + - containerPort: {{ .Values.dataNode.containerPorts.grpc }} + name: grpc + - containerPort: {{ .Values.dataNode.containerPorts.metrics }} + name: http-metrics + {{- if .Values.dataNode.resources }} + resources: {{- toYaml .Values.dataNode.resources | nindent 12 }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.dataNode.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.dataNode.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.dataNode.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.dataNode.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /healthz + port: http-metrics + {{- end }} + {{- if .Values.dataNode.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.dataNode.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.dataNode.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.dataNode.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /healthz + port: http-metrics + {{- end }} + {{- if .Values.dataNode.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.dataNode.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.dataNode.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.dataNode.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: http-metrics + {{- end }} + {{- end }} + {{- if .Values.dataNode.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.dataNode.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + - name: rendered-config + mountPath: /opt/bitnami/milvus/configs + - name: tmp + mountPath: /tmp + - name: tmp-milvus + mountPath: /opt/bitnami/milvus/tmp + # We are using a s3 backend, so this data dir is temporary + - name: tmp-data-milvus + mountPath: /bitnami/milvus/data + {{- if .Values.dataNode.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.dataNode.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.dataNode.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.dataNode.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: tmp-data-milvus + emptyDir: {} + - name: tmp-milvus + emptyDir: {} + - name: tmp + emptyDir: {} + - name: config-common + configMap: + name: {{ template "milvus.configmapName" . }} + {{- if or .Values.milvus.extraConfig .Values.milvus.extraConfigExistingConfigMap }} + - name: extra-config-common + configMap: + name: {{ template "milvus.extraConfigmapName" . }} + {{- end }} + - name: component-config-default + configMap: + name: {{ template "milvus.data-node.configmapName" . }} + {{- if or .Values.dataNode.extraConfig .Values.dataNode.extraConfigExistingConfigMap }} + - name: component-extra-config + configMap: + name: {{ template "milvus.data-node.extraConfigmapName" . }} + {{- end }} + - name: rendered-config + emptyDir: {} + {{- if .Values.dataNode.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.dataNode.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/bitnami/milvus/templates/data-node/extra-configmap.yaml b/bitnami/milvus/templates/data-node/extra-configmap.yaml new file mode 100644 index 0000000000..b840618704 --- /dev/null +++ b/bitnami/milvus/templates/data-node/extra-configmap.yaml @@ -0,0 +1,24 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.dataNode.extraConfig (not .Values.dataNode.extraConfigExistingConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-extra" (include "milvus.data-node.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-node + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + 04_data_node_extra.yaml: | + {{- include "common.tplvalues.render" (dict "value" .Values.dataNode.extraConfig "context" $) | nindent 4 }} +{{- end }} diff --git a/bitnami/milvus/templates/data-node/hpa.yaml b/bitnami/milvus/templates/data-node/hpa.yaml new file mode 100644 index 0000000000..6739c8c833 --- /dev/null +++ b/bitnami/milvus/templates/data-node/hpa.yaml @@ -0,0 +1,59 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.dataNode.autoscaling.hpa.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "milvus.data-node.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-node + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.dataNode.autoscaling.hpa.annotations }} + annotations: + {{- if .Values.dataNode.autoscaling.hpa.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.dataNode.autoscaling.hpa.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + name: {{ template "milvus.data-node.fullname" . }} + minReplicas: {{ .Values.dataNode.autoscaling.hpa.minReplicas }} + maxReplicas: {{ .Values.dataNode.autoscaling.hpa.maxReplicas }} + metrics: + {{- if .Values.dataNode.autoscaling.hpa.targetCPU }} + - type: Resource + resource: + name: cpu + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.dataNode.autoscaling.hpa.targetCPU }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.dataNode.autoscaling.hpa.targetCPU }} + {{- end }} + {{- end }} + {{- if .Values.dataNode.autoscaling.hpa.targetMemory }} + - type: Resource + resource: + name: memory + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.dataNode.autoscaling.hpa.targetMemory }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.dataNode.autoscaling.hpa.targetMemory }} + {{- end }} + {{- end }} +{{- end }} diff --git a/bitnami/milvus/templates/data-node/networkpolicy.yaml b/bitnami/milvus/templates/data-node/networkpolicy.yaml new file mode 100644 index 0000000000..d5d014c104 --- /dev/null +++ b/bitnami/milvus/templates/data-node/networkpolicy.yaml @@ -0,0 +1,88 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.dataNode.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "milvus.data-node.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-node + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-node + policyTypes: + - Ingress + - Egress + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.queryCoord.service.ports.grpc }} + - port: {{ .Values.dataCoord.service.ports.grpc }} + - port: {{ .Values.indexCoord.service.ports.grpc }} + - port: {{ .Values.rootCoord.service.ports.grpc }} + - port: {{ .Values.queryNode.service.ports.grpc }} + - port: {{ .Values.dataNode.service.ports.grpc }} + - port: {{ .Values.indexNode.service.ports.grpc }} + - port: {{ .Values.proxy.service.ports.grpc }} + - port: {{ include "milvus.etcd.port" . }} + - port: {{ include "milvus.s3.port" . }} + - port: {{ include "milvus.kafka.port" . }} + to: + - podSelector: + matchLabels: {{- include "common.labels.standard" . | nindent 14 }} + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.dataNode.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.dataNode.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + ingress: + - ports: + - port: {{ .Values.dataNode.service.ports.grpc }} + {{- if not .Values.dataNode.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + {{- end }} + {{- if .Values.dataNode.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.dataNode.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.dataNode.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.dataNode.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.dataNode.metrics.enabled }} + - ports: + - port: {{ .Values.dataNode.service.ports.metrics }} + {{- end }} + {{- if .Values.dataNode.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.dataNode.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/bitnami/milvus/templates/data-node/pdb.yaml b/bitnami/milvus/templates/data-node/pdb.yaml new file mode 100644 index 0000000000..fe4e948c06 --- /dev/null +++ b/bitnami/milvus/templates/data-node/pdb.yaml @@ -0,0 +1,32 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.dataNode.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ template "milvus.data-node.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-node + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.dataNode.pdb.minAvailable }} + minAvailable: {{ .Values.dataNode.pdb.minAvailable }} + {{- end }} + {{- if .Values.dataNode.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.dataNode.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-node +{{- end }} diff --git a/bitnami/milvus/templates/data-node/service-account.yaml b/bitnami/milvus/templates/data-node/service-account.yaml new file mode 100644 index 0000000000..fa87a1b30e --- /dev/null +++ b/bitnami/milvus/templates/data-node/service-account.yaml @@ -0,0 +1,26 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.dataNode.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "milvus.data-node.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-node + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.dataNode.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.dataNode.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.dataNode.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/bitnami/milvus/templates/data-node/service.yaml b/bitnami/milvus/templates/data-node/service.yaml new file mode 100644 index 0000000000..6e9eb3b1cb --- /dev/null +++ b/bitnami/milvus/templates/data-node/service.yaml @@ -0,0 +1,77 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.dataNode.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "milvus.data-node.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-node + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.dataNode.service.annotations }} + annotations: + {{- if .Values.dataNode.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.dataNode.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if and .Values.dataNode.metrics.enabled .Values.dataNode.metrics.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.dataNode.metrics.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.dataNode.service.type }} + {{- if .Values.dataNode.service.sessionAffinity }} + sessionAffinity: {{ .Values.dataNode.service.sessionAffinity }} + {{- end }} + {{- if .Values.dataNode.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.dataNode.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.dataNode.service.clusterIP }} + clusterIP: {{ .Values.dataNode.service.clusterIP }} + {{- end }} + {{- if (or (eq .Values.dataNode.service.type "LoadBalancer") (eq .Values.dataNode.service.type "NodePort")) }} + externalTrafficPolicy: {{ .Values.dataNode.service.externalTrafficPolicy | quote }} + {{- end }} + {{ if eq .Values.dataNode.service.type "LoadBalancer" }} + loadBalancerSourceRanges: {{ .Values.dataNode.service.loadBalancerSourceRanges }} + {{ end }} + {{- if (and (eq .Values.dataNode.service.type "LoadBalancer") (not (empty .Values.dataNode.service.loadBalancerIP))) }} + loadBalancerIP: {{ .Values.dataNode.service.loadBalancerIP }} + {{- end }} + ports: + - name: grpc + port: {{ .Values.dataNode.service.ports.grpc }} + targetPort: grpc + protocol: TCP + {{- if (and (or (eq .Values.dataNode.service.type "NodePort") (eq .Values.dataNode.service.type "LoadBalancer")) (not (empty .Values.dataNode.service.nodePorts.grpc))) }} + nodePort: {{ .Values.dataNode.service.nodePorts.grpc }} + {{- else if eq .Values.dataNode.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.dataNode.metrics.enabled }} + - name: http-metrics + port: {{ .Values.dataNode.service.ports.metrics }} + targetPort: http-metrics + protocol: TCP + {{- if (and (or (eq .Values.dataNode.service.type "NodePort") (eq .Values.dataNode.service.type "LoadBalancer")) (not (empty .Values.dataNode.service.nodePorts.metrics))) }} + nodePort: {{ .Values.dataNode.service.nodePorts.metrics }} + {{- else if eq .Values.dataNode.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- end }} + {{- if .Values.dataNode.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.dataNode.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-node +{{- end }} diff --git a/bitnami/milvus/templates/data-node/servicemonitor.yaml b/bitnami/milvus/templates/data-node/servicemonitor.yaml new file mode 100644 index 0000000000..80ca2fb58e --- /dev/null +++ b/bitnami/milvus/templates/data-node/servicemonitor.yaml @@ -0,0 +1,61 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.dataNode.enabled .Values.dataNode.metrics.enabled .Values.dataNode.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "milvus.data-node.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.dataNode.metrics.serviceMonitor.namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-node + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.dataNode.metrics.serviceMonitor.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.dataNode.metrics.serviceMonitor.labels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.dataNode.metrics.serviceMonitor.annotations }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.dataNode.metrics.serviceMonitor.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.dataNode.metrics.serviceMonitor.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + {{- if .Values.dataNode.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.dataNode.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.dataNode.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.dataNode.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-node + endpoints: + - port: http-metrics + {{- if .Values.dataNode.metrics.serviceMonitor.interval }} + interval: {{ .Values.dataNode.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.dataNode.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.dataNode.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.dataNode.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.dataNode.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.dataNode.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- toYaml .Values.dataNode.metrics.serviceMonitor.metricRelabelings | nindent 6 }} + {{- end }} + {{- if .Values.dataNode.metrics.serviceMonitor.relabelings }} + relabelings: {{- toYaml .Values.dataNode.metrics.serviceMonitor.relabelings | nindent 6 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . | quote }} +{{- end }} diff --git a/bitnami/milvus/templates/data-node/vpa.yaml b/bitnami/milvus/templates/data-node/vpa.yaml new file mode 100644 index 0000000000..eda8976825 --- /dev/null +++ b/bitnami/milvus/templates/data-node/vpa.yaml @@ -0,0 +1,51 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1/VerticalPodAutoscaler") .Values.dataNode.autoscaling.vpa.enabled }} +apiVersion: {{ include "common.capabilities.vpa.apiVersion" . }} +kind: VerticalPodAutoscaler +metadata: + name: {{ include "milvus.data-node.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: data-node + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.dataNode.autoscaling.vpa.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.dataNode.autoscaling.vpa.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + resourcePolicy: + containerPolicies: + - containerName: milvus-data-node + {{- with .Values.dataNode.autoscaling.vpa.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.dataNode.autoscaling.vpa.maxAllowed }} + maxAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.dataNode.autoscaling.vpa.minAllowed }} + minAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + targetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + name: {{ include "milvus.data-node.fullname" . }} + {{- if .Values.dataNode.autoscaling.vpa.updatePolicy }} + updatePolicy: + {{- with .Values.dataNode.autoscaling.vpa.updatePolicy.updateMode }} + updateMode: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/bitnami/milvus/templates/externaletcd-secret.yaml b/bitnami/milvus/templates/externaletcd-secret.yaml new file mode 100644 index 0000000000..8f2f22119e --- /dev/null +++ b/bitnami/milvus/templates/externaletcd-secret.yaml @@ -0,0 +1,23 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (not .Values.etcd.enabled) (not .Values.externalEtcd.existingSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-external-etcd" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +type: Opaque +data: + etcd-root-password: {{ .Values.externalEtcd.password | b64enc | quote }} +{{- end }} diff --git a/bitnami/milvus/templates/externalkafka-secret.yaml b/bitnami/milvus/templates/externalkafka-secret.yaml new file mode 100644 index 0000000000..7d49c7c035 --- /dev/null +++ b/bitnami/milvus/templates/externalkafka-secret.yaml @@ -0,0 +1,23 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (not .Values.kafka.enabled) (not .Values.externalKafka.existingSecret) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-external-kafka" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +type: Opaque +data: + kafka-root-password: {{ .Values.externalKafka.password | b64enc | quote }} +{{- end }} diff --git a/bitnami/milvus/templates/externals3-secret.yaml b/bitnami/milvus/templates/externals3-secret.yaml new file mode 100644 index 0000000000..8f4c0b5a9b --- /dev/null +++ b/bitnami/milvus/templates/externals3-secret.yaml @@ -0,0 +1,24 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (not .Values.minio.enabled) (not .Values.externalS3.existingSecret) .Values.enableS3 }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ printf "%s-externals3" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: mivus + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.commonLabels "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" $) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{ .Values.externalS3.existingSecretAccessKeyIDKey }}: {{ .Values.externalS3.accessKeyID | b64enc | quote }} + {{ .Values.externalS3.existingSecretKeySecretKey }}: {{ .Values.externalS3.accessKeySecret | b64enc | quote }} +{{- end }} diff --git a/bitnami/milvus/templates/extra-configmap.yaml b/bitnami/milvus/templates/extra-configmap.yaml new file mode 100644 index 0000000000..b918d38802 --- /dev/null +++ b/bitnami/milvus/templates/extra-configmap.yaml @@ -0,0 +1,23 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.milvus.extraConfig (not .Values.milvus.extraConfigExistingConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-extra" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + 00_milvus_default.yaml: | + {{- include "common.tplvalues.render" (dict "value" .Values.milvus.extraConfig "context" $) | nindent 4 }} +{{- end }} diff --git a/bitnami/milvus/templates/extra-list.yaml b/bitnami/milvus/templates/extra-list.yaml new file mode 100644 index 0000000000..42caa402ca --- /dev/null +++ b/bitnami/milvus/templates/extra-list.yaml @@ -0,0 +1,9 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- range .Values.extraDeploy }} +--- +{{ include "common.tplvalues.render" (dict "value" . "context" $) }} +{{- end }} diff --git a/bitnami/milvus/templates/index-coordinator/configmap.yaml b/bitnami/milvus/templates/index-coordinator/configmap.yaml new file mode 100644 index 0000000000..cdc07e53b3 --- /dev/null +++ b/bitnami/milvus/templates/index-coordinator/configmap.yaml @@ -0,0 +1,23 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if not .Values.indexCoord.existingConfigMap }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "milvus.index-coordinator.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + 03_index_coordinator_default.yaml: | + {{- include "common.tplvalues.render" (dict "value" .Values.indexCoord.defaultConfig "context" $) | nindent 4 }} +{{- end }} diff --git a/bitnami/milvus/templates/index-coordinator/deployment.yaml b/bitnami/milvus/templates/index-coordinator/deployment.yaml new file mode 100644 index 0000000000..9f69202e9f --- /dev/null +++ b/bitnami/milvus/templates/index-coordinator/deployment.yaml @@ -0,0 +1,208 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.indexCoord.enabled }} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "milvus.index-coordinator.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-coordinator + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.indexCoord.autoscaling.hpa.enabled }} + replicas: {{ .Values.indexCoord.replicaCount }} + {{- end }} + {{- if .Values.indexCoord.updateStrategy }} + strategy: {{- toYaml .Values.indexCoord.updateStrategy | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-coordinator + template: + metadata: + annotations: + checksum/common-config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/common-config-extra: {{ include (print $.Template.BasePath "/extra-configmap.yaml") . | sha256sum }} + checksum/config-extra: {{ include (print $.Template.BasePath "/index-coordinator/extra-configmap.yaml") . | sha256sum }} + {{- if .Values.indexCoord.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.indexCoord.podAnnotations "context" $) | nindent 8 }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-coordinator + {{- if .Values.indexCoord.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.indexCoord.podLabels "context" $) | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "milvus.index-coordinator.serviceAccountName" . }} + {{- include "milvus.imagePullSecrets" . | nindent 6 }} + {{- if .Values.indexCoord.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.indexCoord.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.indexCoord.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.indexCoord.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.indexCoord.podAffinityPreset "component" "index-coordinator" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.indexCoord.podAntiAffinityPreset "component" "index-coordinator" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.indexCoord.nodeAffinityPreset.type "key" .Values.indexCoord.nodeAffinityPreset.key "values" .Values.indexCoord.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.indexCoord.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.indexCoord.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.indexCoord.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.indexCoord.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.indexCoord.runtimeClassName }} + runtimeClassName: {{ .Values.indexCoord.runtimeClassName | quote }} + {{- end }} + {{- if .Values.indexCoord.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.indexCoord.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.indexCoord.priorityClassName }} + priorityClassName: {{ .Values.indexCoord.priorityClassName | quote }} + {{- end }} + {{- if .Values.indexCoord.schedulerName }} + schedulerName: {{ .Values.indexCoord.schedulerName }} + {{- end }} + {{- if .Values.indexCoord.podSecurityContext.enabled }} + securityContext: {{- omit .Values.indexCoord.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- if .Values.indexCoord.enableDefaultInitContainers }} + {{- include "milvus.waitForETCDInitContainer" . | nindent 8 }} + {{- include "milvus.waitForKafkaInitContainer" . | nindent 8 }} + {{- include "milvus.waitForS3InitContainer" . | nindent 8 }} + {{- include "milvus.prepareMilvusInitContainer" (dict "component" "indexCoord" "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.indexCoord.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.indexCoord.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: milvus + image: {{ template "milvus.image" . }} + imagePullPolicy: {{ .Values.milvus.image.pullPolicy }} + {{- if .Values.indexCoord.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.indexCoord.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.indexCoord.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.indexCoord.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.indexCoord.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.indexCoord.args "context" $) | nindent 12 }} + {{- else }} + args: + - run + - indexcoord + {{- end }} + env: + - name: METRICS_PORT + value: {{ .Values.indexCoord.containerPorts.metrics | quote }} + {{- if .Values.indexCoord.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.indexCoord.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.indexCoord.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.indexCoord.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.indexCoord.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.indexCoord.extraEnvVarsSecret "context" $) }} + {{- end }} + ports: + - containerPort: {{ .Values.indexCoord.containerPorts.grpc }} + name: grpc + - containerPort: {{ .Values.indexCoord.containerPorts.metrics }} + name: http-metrics + {{- if .Values.indexCoord.resources }} + resources: {{- toYaml .Values.indexCoord.resources | nindent 12 }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.indexCoord.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.indexCoord.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.indexCoord.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.indexCoord.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /healthz + port: http-metrics + {{- end }} + {{- if .Values.indexCoord.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.indexCoord.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.indexCoord.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.indexCoord.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /healthz + port: http-metrics + {{- end }} + {{- if .Values.indexCoord.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.indexCoord.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.indexCoord.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.indexCoord.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: http-metrics + {{- end }} + {{- end }} + {{- if .Values.indexCoord.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.indexCoord.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + - name: rendered-config + mountPath: /opt/bitnami/milvus/configs + - name: tmp + mountPath: /tmp + - name: tmp-milvus + mountPath: /opt/bitnami/milvus/tmp + # We are using a s3 backend, so this data dir is temporary + - name: tmp-data-milvus + mountPath: /bitnami/milvus/data + {{- if .Values.indexCoord.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.indexCoord.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.indexCoord.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.indexCoord.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: tmp-data-milvus + emptyDir: {} + - name: tmp-milvus + emptyDir: {} + - name: tmp + emptyDir: {} + - name: config-common + configMap: + name: {{ template "milvus.configmapName" . }} + {{- if or .Values.milvus.extraConfig .Values.milvus.extraConfigExistingConfigMap }} + - name: extra-config-common + configMap: + name: {{ template "milvus.extraConfigmapName" . }} + {{- end }} + - name: component-config-default + configMap: + name: {{ template "milvus.index-coordinator.configmapName" . }} + {{- if or .Values.indexCoord.extraConfig .Values.indexCoord.extraConfigExistingConfigMap }} + - name: component-extra-config + configMap: + name: {{ template "milvus.index-coordinator.extraConfigmapName" . }} + {{- end }} + - name: rendered-config + emptyDir: {} + {{- if .Values.indexCoord.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.indexCoord.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/bitnami/milvus/templates/index-coordinator/extra-configmap.yaml b/bitnami/milvus/templates/index-coordinator/extra-configmap.yaml new file mode 100644 index 0000000000..e02dfce4ca --- /dev/null +++ b/bitnami/milvus/templates/index-coordinator/extra-configmap.yaml @@ -0,0 +1,24 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.indexCoord.extraConfig (not .Values.indexCoord.extraConfigExistingConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-extra" (include "milvus.index-coordinator.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-coordinator + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + 04_index_coordinator_extra.yaml: | + {{- include "common.tplvalues.render" (dict "value" .Values.indexCoord.extraConfig "context" $) | nindent 4 }} +{{- end }} diff --git a/bitnami/milvus/templates/index-coordinator/hpa.yaml b/bitnami/milvus/templates/index-coordinator/hpa.yaml new file mode 100644 index 0000000000..c0e18163d7 --- /dev/null +++ b/bitnami/milvus/templates/index-coordinator/hpa.yaml @@ -0,0 +1,59 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.indexCoord.autoscaling.hpa.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "milvus.index-coordinator.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-coordinator + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.indexCoord.autoscaling.hpa.annotations }} + annotations: + {{- if .Values.indexCoord.autoscaling.hpa.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.indexCoord.autoscaling.hpa.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + name: {{ template "milvus.index-coordinator.fullname" . }} + minReplicas: {{ .Values.indexCoord.autoscaling.hpa.minReplicas }} + maxReplicas: {{ .Values.indexCoord.autoscaling.hpa.maxReplicas }} + metrics: + {{- if .Values.indexCoord.autoscaling.hpa.targetCPU }} + - type: Resource + resource: + name: cpu + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.indexCoord.autoscaling.hpa.targetCPU }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.indexCoord.autoscaling.hpa.targetCPU }} + {{- end }} + {{- end }} + {{- if .Values.indexCoord.autoscaling.hpa.targetMemory }} + - type: Resource + resource: + name: memory + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.indexCoord.autoscaling.hpa.targetMemory }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.indexCoord.autoscaling.hpa.targetMemory }} + {{- end }} + {{- end }} +{{- end }} diff --git a/bitnami/milvus/templates/index-coordinator/networkpolicy.yaml b/bitnami/milvus/templates/index-coordinator/networkpolicy.yaml new file mode 100644 index 0000000000..e381d03c53 --- /dev/null +++ b/bitnami/milvus/templates/index-coordinator/networkpolicy.yaml @@ -0,0 +1,88 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.indexCoord.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "milvus.index-coordinator.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-coordinator + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-coordinator + policyTypes: + - Ingress + - Egress + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.queryCoord.service.ports.grpc }} + - port: {{ .Values.dataCoord.service.ports.grpc }} + - port: {{ .Values.indexCoord.service.ports.grpc }} + - port: {{ .Values.rootCoord.service.ports.grpc }} + - port: {{ .Values.queryNode.service.ports.grpc }} + - port: {{ .Values.dataNode.service.ports.grpc }} + - port: {{ .Values.indexNode.service.ports.grpc }} + - port: {{ .Values.proxy.service.ports.grpc }} + - port: {{ include "milvus.etcd.port" . }} + - port: {{ include "milvus.s3.port" . }} + - port: {{ include "milvus.kafka.port" . }} + to: + - podSelector: + matchLabels: {{- include "common.labels.standard" . | nindent 14 }} + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.indexCoord.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.indexCoord.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + ingress: + - ports: + - port: {{ .Values.indexCoord.service.ports.grpc }} + {{- if not .Values.indexCoord.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + {{- end }} + {{- if .Values.indexCoord.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.indexCoord.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.indexCoord.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.indexCoord.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.indexCoord.metrics.enabled }} + - ports: + - port: {{ .Values.indexCoord.service.ports.metrics }} + {{- end }} + {{- if .Values.indexCoord.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.indexCoord.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/bitnami/milvus/templates/index-coordinator/pdb.yaml b/bitnami/milvus/templates/index-coordinator/pdb.yaml new file mode 100644 index 0000000000..e797c1e5cd --- /dev/null +++ b/bitnami/milvus/templates/index-coordinator/pdb.yaml @@ -0,0 +1,32 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.indexCoord.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ template "milvus.index-coordinator.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-coordinator + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.indexCoord.pdb.minAvailable }} + minAvailable: {{ .Values.indexCoord.pdb.minAvailable }} + {{- end }} + {{- if .Values.indexCoord.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.indexCoord.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-coordinator +{{- end }} diff --git a/bitnami/milvus/templates/index-coordinator/service-account.yaml b/bitnami/milvus/templates/index-coordinator/service-account.yaml new file mode 100644 index 0000000000..ac32db7613 --- /dev/null +++ b/bitnami/milvus/templates/index-coordinator/service-account.yaml @@ -0,0 +1,26 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.indexCoord.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "milvus.index-coordinator.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-coordinator + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.indexCoord.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.indexCoord.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.indexCoord.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/bitnami/milvus/templates/index-coordinator/service.yaml b/bitnami/milvus/templates/index-coordinator/service.yaml new file mode 100644 index 0000000000..a46cd5fa01 --- /dev/null +++ b/bitnami/milvus/templates/index-coordinator/service.yaml @@ -0,0 +1,77 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.indexCoord.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "milvus.index-coordinator.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-coordinator + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.indexCoord.service.annotations }} + annotations: + {{- if .Values.indexCoord.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.indexCoord.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if and .Values.indexCoord.metrics.enabled .Values.indexCoord.metrics.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.indexCoord.metrics.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.indexCoord.service.type }} + {{- if .Values.indexCoord.service.sessionAffinity }} + sessionAffinity: {{ .Values.indexCoord.service.sessionAffinity }} + {{- end }} + {{- if .Values.indexCoord.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.indexCoord.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.indexCoord.service.clusterIP }} + clusterIP: {{ .Values.indexCoord.service.clusterIP }} + {{- end }} + {{- if (or (eq .Values.indexCoord.service.type "LoadBalancer") (eq .Values.indexCoord.service.type "NodePort")) }} + externalTrafficPolicy: {{ .Values.indexCoord.service.externalTrafficPolicy | quote }} + {{- end }} + {{ if eq .Values.indexCoord.service.type "LoadBalancer" }} + loadBalancerSourceRanges: {{ .Values.indexCoord.service.loadBalancerSourceRanges }} + {{ end }} + {{- if (and (eq .Values.indexCoord.service.type "LoadBalancer") (not (empty .Values.indexCoord.service.loadBalancerIP))) }} + loadBalancerIP: {{ .Values.indexCoord.service.loadBalancerIP }} + {{- end }} + ports: + - name: grpc + port: {{ .Values.indexCoord.service.ports.grpc }} + targetPort: grpc + protocol: TCP + {{- if (and (or (eq .Values.indexCoord.service.type "NodePort") (eq .Values.indexCoord.service.type "LoadBalancer")) (not (empty .Values.indexCoord.service.nodePorts.grpc))) }} + nodePort: {{ .Values.indexCoord.service.nodePorts.grpc }} + {{- else if eq .Values.indexCoord.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.indexCoord.metrics.enabled }} + - name: http-metrics + port: {{ .Values.indexCoord.service.ports.metrics }} + targetPort: http-metrics + protocol: TCP + {{- if (and (or (eq .Values.indexCoord.service.type "NodePort") (eq .Values.indexCoord.service.type "LoadBalancer")) (not (empty .Values.indexCoord.service.nodePorts.metrics))) }} + nodePort: {{ .Values.indexCoord.service.nodePorts.metrics }} + {{- else if eq .Values.indexCoord.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- end }} + {{- if .Values.indexCoord.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.indexCoord.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-coordinator +{{- end }} diff --git a/bitnami/milvus/templates/index-coordinator/servicemonitor.yaml b/bitnami/milvus/templates/index-coordinator/servicemonitor.yaml new file mode 100644 index 0000000000..d538770909 --- /dev/null +++ b/bitnami/milvus/templates/index-coordinator/servicemonitor.yaml @@ -0,0 +1,61 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.indexCoord.enabled .Values.indexCoord.metrics.enabled .Values.indexCoord.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "milvus.index-coordinator.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.indexCoord.metrics.serviceMonitor.namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-coordinator + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.indexCoord.metrics.serviceMonitor.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.indexCoord.metrics.serviceMonitor.labels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.indexCoord.metrics.serviceMonitor.annotations }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.indexCoord.metrics.serviceMonitor.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.indexCoord.metrics.serviceMonitor.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + {{- if .Values.indexCoord.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.indexCoord.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.indexCoord.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.indexCoord.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-coordinator + endpoints: + - port: http-metrics + {{- if .Values.indexCoord.metrics.serviceMonitor.interval }} + interval: {{ .Values.indexCoord.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.indexCoord.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.indexCoord.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.indexCoord.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.indexCoord.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.indexCoord.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- toYaml .Values.indexCoord.metrics.serviceMonitor.metricRelabelings | nindent 6 }} + {{- end }} + {{- if .Values.indexCoord.metrics.serviceMonitor.relabelings }} + relabelings: {{- toYaml .Values.indexCoord.metrics.serviceMonitor.relabelings | nindent 6 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . | quote }} +{{- end }} diff --git a/bitnami/milvus/templates/index-coordinator/vpa.yaml b/bitnami/milvus/templates/index-coordinator/vpa.yaml new file mode 100644 index 0000000000..2de2ad119e --- /dev/null +++ b/bitnami/milvus/templates/index-coordinator/vpa.yaml @@ -0,0 +1,51 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1/VerticalPodAutoscaler") .Values.indexCoord.autoscaling.vpa.enabled }} +apiVersion: {{ include "common.capabilities.vpa.apiVersion" . }} +kind: VerticalPodAutoscaler +metadata: + name: {{ include "milvus.index-coordinator.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-coordinator + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.indexCoord.autoscaling.vpa.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.indexCoord.autoscaling.vpa.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + resourcePolicy: + containerPolicies: + - containerName: milvus-index-coordinator + {{- with .Values.indexCoord.autoscaling.vpa.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.indexCoord.autoscaling.vpa.maxAllowed }} + maxAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.indexCoord.autoscaling.vpa.minAllowed }} + minAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + targetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + name: {{ include "milvus.index-coordinator.fullname" . }} + {{- if .Values.indexCoord.autoscaling.vpa.updatePolicy }} + updatePolicy: + {{- with .Values.indexCoord.autoscaling.vpa.updatePolicy.updateMode }} + updateMode: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/bitnami/milvus/templates/index-node/configmap.yaml b/bitnami/milvus/templates/index-node/configmap.yaml new file mode 100644 index 0000000000..642fc00eca --- /dev/null +++ b/bitnami/milvus/templates/index-node/configmap.yaml @@ -0,0 +1,23 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if not .Values.indexNode.existingConfigMap }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "milvus.index-node.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + 03_index_node_default.yaml: | + {{- include "common.tplvalues.render" (dict "value" .Values.indexNode.defaultConfig "context" $) | nindent 4 }} +{{- end }} diff --git a/bitnami/milvus/templates/index-node/deployment.yaml b/bitnami/milvus/templates/index-node/deployment.yaml new file mode 100644 index 0000000000..8e4a039cc8 --- /dev/null +++ b/bitnami/milvus/templates/index-node/deployment.yaml @@ -0,0 +1,208 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.indexNode.enabled }} +apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} +kind: Deployment +metadata: + name: {{ template "milvus.index-node.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-node + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if not .Values.indexNode.autoscaling.hpa.enabled }} + replicas: {{ .Values.indexNode.replicaCount }} + {{- end }} + {{- if .Values.indexNode.updateStrategy }} + strategy: {{- toYaml .Values.indexNode.updateStrategy | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-node + template: + metadata: + annotations: + checksum/common-config: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/common-config-extra: {{ include (print $.Template.BasePath "/extra-configmap.yaml") . | sha256sum }} + checksum/config-extra: {{ include (print $.Template.BasePath "/index-node/extra-configmap.yaml") . | sha256sum }} + {{- if .Values.indexNode.podAnnotations }} + {{- include "common.tplvalues.render" (dict "value" .Values.indexNode.podAnnotations "context" $) | nindent 8 }} + {{- end }} + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-node + {{- if .Values.indexNode.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.indexNode.podLabels "context" $) | nindent 8 }} + {{- end }} + spec: + serviceAccountName: {{ template "milvus.index-node.serviceAccountName" . }} + {{- include "milvus.imagePullSecrets" . | nindent 6 }} + {{- if .Values.indexNode.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.indexNode.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.indexNode.affinity }} + affinity: {{- include "common.tplvalues.render" ( dict "value" .Values.indexNode.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.indexNode.podAffinityPreset "component" "index-node" "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.indexNode.podAntiAffinityPreset "component" "index-node" "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.indexNode.nodeAffinityPreset.type "key" .Values.indexNode.nodeAffinityPreset.key "values" .Values.indexNode.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.indexNode.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" ( dict "value" .Values.indexNode.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.indexNode.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.indexNode.tolerations "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.indexNode.runtimeClassName }} + runtimeClassName: {{ .Values.indexNode.runtimeClassName | quote }} + {{- end }} + {{- if .Values.indexNode.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.indexNode.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.indexNode.priorityClassName }} + priorityClassName: {{ .Values.indexNode.priorityClassName | quote }} + {{- end }} + {{- if .Values.indexNode.schedulerName }} + schedulerName: {{ .Values.indexNode.schedulerName }} + {{- end }} + {{- if .Values.indexNode.podSecurityContext.enabled }} + securityContext: {{- omit .Values.indexNode.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + initContainers: + {{- if .Values.indexNode.enableDefaultInitContainers }} + {{- include "milvus.waitForETCDInitContainer" . | nindent 8 }} + {{- include "milvus.waitForKafkaInitContainer" . | nindent 8 }} + {{- include "milvus.waitForS3InitContainer" . | nindent 8 }} + {{- include "milvus.prepareMilvusInitContainer" (dict "component" "indexNode" "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.indexNode.initContainers }} + {{- include "common.tplvalues.render" (dict "value" .Values.indexNode.initContainers "context" $) | nindent 8 }} + {{- end }} + containers: + - name: milvus + image: {{ template "milvus.image" . }} + imagePullPolicy: {{ .Values.milvus.image.pullPolicy }} + {{- if .Values.indexNode.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.indexNode.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 12 }} + {{- else if .Values.indexNode.command }} + command: {{- include "common.tplvalues.render" (dict "value" .Values.indexNode.command "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.diagnosticMode.enabled }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.args "context" $) | nindent 12 }} + {{- else if .Values.indexNode.args }} + args: {{- include "common.tplvalues.render" (dict "value" .Values.indexNode.args "context" $) | nindent 12 }} + {{- else }} + args: + - run + - indexnode + {{- end }} + env: + - name: METRICS_PORT + value: {{ .Values.indexNode.containerPorts.metrics | quote }} + {{- if .Values.indexNode.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.indexNode.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + envFrom: + {{- if .Values.indexNode.extraEnvVarsCM }} + - configMapRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.indexNode.extraEnvVarsCM "context" $) }} + {{- end }} + {{- if .Values.indexNode.extraEnvVarsSecret }} + - secretRef: + name: {{ include "common.tplvalues.render" (dict "value" .Values.indexNode.extraEnvVarsSecret "context" $) }} + {{- end }} + ports: + - containerPort: {{ .Values.indexNode.containerPorts.grpc }} + name: grpc + - containerPort: {{ .Values.indexNode.containerPorts.metrics }} + name: http-metrics + {{- if .Values.indexNode.resources }} + resources: {{- toYaml .Values.indexNode.resources | nindent 12 }} + {{- end }} + {{- if not .Values.diagnosticMode.enabled }} + {{- if .Values.indexNode.customLivenessProbe }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.indexNode.customLivenessProbe "context" $) | nindent 12 }} + {{- else if .Values.indexNode.livenessProbe.enabled }} + livenessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.indexNode.livenessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /healthz + port: http-metrics + {{- end }} + {{- if .Values.indexNode.customReadinessProbe }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" .Values.indexNode.customReadinessProbe "context" $) | nindent 12 }} + {{- else if .Values.indexNode.readinessProbe.enabled }} + readinessProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.indexNode.readinessProbe "enabled") "context" $) | nindent 12 }} + httpGet: + path: /healthz + port: http-metrics + {{- end }} + {{- if .Values.indexNode.customStartupProbe }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" .Values.indexNode.customStartupProbe "context" $) | nindent 12 }} + {{- else if .Values.indexNode.startupProbe.enabled }} + startupProbe: {{- include "common.tplvalues.render" (dict "value" (omit .Values.indexNode.startupProbe "enabled") "context" $) | nindent 12 }} + tcpSocket: + port: http-metrics + {{- end }} + {{- end }} + {{- if .Values.indexNode.lifecycleHooks }} + lifecycle: {{- include "common.tplvalues.render" (dict "value" .Values.indexNode.lifecycleHooks "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + - name: rendered-config + mountPath: /opt/bitnami/milvus/configs + - name: tmp + mountPath: /tmp + - name: tmp-milvus + mountPath: /opt/bitnami/milvus/tmp + # We are using a s3 backend, so this data dir is temporary + - name: tmp-data-milvus + mountPath: /bitnami/milvus/data + {{- if .Values.indexNode.extraVolumeMounts }} + {{- include "common.tplvalues.render" (dict "value" .Values.indexNode.extraVolumeMounts "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.indexNode.sidecars }} + {{- include "common.tplvalues.render" ( dict "value" .Values.indexNode.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + - name: tmp-data-milvus + emptyDir: {} + - name: tmp-milvus + emptyDir: {} + - name: tmp + emptyDir: {} + - name: config-common + configMap: + name: {{ template "milvus.configmapName" . }} + {{- if or .Values.milvus.extraConfig .Values.milvus.extraConfigExistingConfigMap }} + - name: extra-config-common + configMap: + name: {{ template "milvus.extraConfigmapName" . }} + {{- end }} + - name: component-config-default + configMap: + name: {{ template "milvus.index-node.configmapName" . }} + {{- if or .Values.indexNode.extraConfig .Values.indexNode.extraConfigExistingConfigMap }} + - name: component-extra-config + configMap: + name: {{ template "milvus.index-node.extraConfigmapName" . }} + {{- end }} + - name: rendered-config + emptyDir: {} + {{- if .Values.indexNode.extraVolumes }} + {{- include "common.tplvalues.render" (dict "value" .Values.indexNode.extraVolumes "context" $) | nindent 8 }} + {{- end }} +{{- end }} diff --git a/bitnami/milvus/templates/index-node/extra-configmap.yaml b/bitnami/milvus/templates/index-node/extra-configmap.yaml new file mode 100644 index 0000000000..128109926f --- /dev/null +++ b/bitnami/milvus/templates/index-node/extra-configmap.yaml @@ -0,0 +1,24 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.indexNode.extraConfig (not .Values.indexNode.extraConfigExistingConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ printf "%s-extra" (include "milvus.index-node.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-node + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + 04_index_node_extra.yaml: | + {{- include "common.tplvalues.render" (dict "value" .Values.indexNode.extraConfig "context" $) | nindent 4 }} +{{- end }} diff --git a/bitnami/milvus/templates/index-node/hpa.yaml b/bitnami/milvus/templates/index-node/hpa.yaml new file mode 100644 index 0000000000..2ba0fb1a84 --- /dev/null +++ b/bitnami/milvus/templates/index-node/hpa.yaml @@ -0,0 +1,59 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.indexNode.autoscaling.hpa.enabled }} +apiVersion: {{ include "common.capabilities.hpa.apiVersion" ( dict "context" $ ) }} +kind: HorizontalPodAutoscaler +metadata: + name: {{ template "milvus.index-node.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-node + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.indexNode.autoscaling.hpa.annotations }} + annotations: + {{- if .Values.indexNode.autoscaling.hpa.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.indexNode.autoscaling.hpa.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + scaleTargetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + name: {{ template "milvus.index-node.fullname" . }} + minReplicas: {{ .Values.indexNode.autoscaling.hpa.minReplicas }} + maxReplicas: {{ .Values.indexNode.autoscaling.hpa.maxReplicas }} + metrics: + {{- if .Values.indexNode.autoscaling.hpa.targetCPU }} + - type: Resource + resource: + name: cpu + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.indexNode.autoscaling.hpa.targetCPU }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.indexNode.autoscaling.hpa.targetCPU }} + {{- end }} + {{- end }} + {{- if .Values.indexNode.autoscaling.hpa.targetMemory }} + - type: Resource + resource: + name: memory + {{- if semverCompare "<1.23-0" (include "common.capabilities.kubeVersion" .) }} + targetAverageUtilization: {{ .Values.indexNode.autoscaling.hpa.targetMemory }} + {{- else }} + target: + type: Utilization + averageUtilization: {{ .Values.indexNode.autoscaling.hpa.targetMemory }} + {{- end }} + {{- end }} +{{- end }} diff --git a/bitnami/milvus/templates/index-node/networkpolicy.yaml b/bitnami/milvus/templates/index-node/networkpolicy.yaml new file mode 100644 index 0000000000..69c39125fd --- /dev/null +++ b/bitnami/milvus/templates/index-node/networkpolicy.yaml @@ -0,0 +1,88 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.indexNode.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ include "common.capabilities.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "milvus.index-node.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-node + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-node + policyTypes: + - Ingress + - Egress + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + - port: 53 + protocol: TCP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.queryCoord.service.ports.grpc }} + - port: {{ .Values.dataCoord.service.ports.grpc }} + - port: {{ .Values.indexCoord.service.ports.grpc }} + - port: {{ .Values.rootCoord.service.ports.grpc }} + - port: {{ .Values.queryNode.service.ports.grpc }} + - port: {{ .Values.dataNode.service.ports.grpc }} + - port: {{ .Values.indexNode.service.ports.grpc }} + - port: {{ .Values.proxy.service.ports.grpc }} + - port: {{ include "milvus.etcd.port" . }} + - port: {{ include "milvus.s3.port" . }} + - port: {{ include "milvus.kafka.port" . }} + to: + - podSelector: + matchLabels: {{- include "common.labels.standard" . | nindent 14 }} + {{- if .Values.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.podLabels "context" $) | nindent 14 }} + {{- end }} + {{- if .Values.indexNode.networkPolicy.extraEgress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.indexNode.networkPolicy.extraEgress "context" $ ) | nindent 4 }} + {{- end }} + ingress: + - ports: + - port: {{ .Values.indexNode.service.ports.grpc }} + {{- if not .Values.indexNode.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "common.names.fullname" . }}-client: "true" + {{- end }} + {{- if .Values.indexNode.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.indexNode.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.indexNode.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.indexNode.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.indexNode.metrics.enabled }} + - ports: + - port: {{ .Values.indexNode.service.ports.metrics }} + {{- end }} + {{- if .Values.indexNode.networkPolicy.extraIngress }} + {{- include "common.tplvalues.render" ( dict "value" .Values.indexNode.networkPolicy.extraIngress "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/bitnami/milvus/templates/index-node/pdb.yaml b/bitnami/milvus/templates/index-node/pdb.yaml new file mode 100644 index 0000000000..6c5424f6e5 --- /dev/null +++ b/bitnami/milvus/templates/index-node/pdb.yaml @@ -0,0 +1,32 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.indexNode.pdb.create }} +apiVersion: {{ include "common.capabilities.policy.apiVersion" . }} +kind: PodDisruptionBudget +metadata: + name: {{ template "milvus.index-node.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-node + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.indexNode.pdb.minAvailable }} + minAvailable: {{ .Values.indexNode.pdb.minAvailable }} + {{- end }} + {{- if .Values.indexNode.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.indexNode.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{- include "common.labels.matchLabels" . | nindent 6 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-node +{{- end }} diff --git a/bitnami/milvus/templates/index-node/service-account.yaml b/bitnami/milvus/templates/index-node/service-account.yaml new file mode 100644 index 0000000000..8611b0740a --- /dev/null +++ b/bitnami/milvus/templates/index-node/service-account.yaml @@ -0,0 +1,26 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.indexNode.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "milvus.index-node.serviceAccountName" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-node + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.indexNode.serviceAccount.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.indexNode.serviceAccount.annotations "context" $ ) | nindent 4 }} + {{- end }} +automountServiceAccountToken: {{ .Values.indexNode.serviceAccount.automountServiceAccountToken }} +{{- end }} diff --git a/bitnami/milvus/templates/index-node/service.yaml b/bitnami/milvus/templates/index-node/service.yaml new file mode 100644 index 0000000000..a1275dd327 --- /dev/null +++ b/bitnami/milvus/templates/index-node/service.yaml @@ -0,0 +1,77 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if .Values.indexNode.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "milvus.index-node.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-node + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.indexNode.service.annotations }} + annotations: + {{- if .Values.indexNode.service.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.indexNode.service.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if and .Values.indexNode.metrics.enabled .Values.indexNode.metrics.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.indexNode.metrics.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.indexNode.service.type }} + {{- if .Values.indexNode.service.sessionAffinity }} + sessionAffinity: {{ .Values.indexNode.service.sessionAffinity }} + {{- end }} + {{- if .Values.indexNode.service.sessionAffinityConfig }} + sessionAffinityConfig: {{- include "common.tplvalues.render" (dict "value" .Values.indexNode.service.sessionAffinityConfig "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.indexNode.service.clusterIP }} + clusterIP: {{ .Values.indexNode.service.clusterIP }} + {{- end }} + {{- if (or (eq .Values.indexNode.service.type "LoadBalancer") (eq .Values.indexNode.service.type "NodePort")) }} + externalTrafficPolicy: {{ .Values.indexNode.service.externalTrafficPolicy | quote }} + {{- end }} + {{ if eq .Values.indexNode.service.type "LoadBalancer" }} + loadBalancerSourceRanges: {{ .Values.indexNode.service.loadBalancerSourceRanges }} + {{ end }} + {{- if (and (eq .Values.indexNode.service.type "LoadBalancer") (not (empty .Values.indexNode.service.loadBalancerIP))) }} + loadBalancerIP: {{ .Values.indexNode.service.loadBalancerIP }} + {{- end }} + ports: + - name: grpc + port: {{ .Values.indexNode.service.ports.grpc }} + targetPort: grpc + protocol: TCP + {{- if (and (or (eq .Values.indexNode.service.type "NodePort") (eq .Values.indexNode.service.type "LoadBalancer")) (not (empty .Values.indexNode.service.nodePorts.grpc))) }} + nodePort: {{ .Values.indexNode.service.nodePorts.grpc }} + {{- else if eq .Values.indexNode.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if .Values.indexNode.metrics.enabled }} + - name: http-metrics + port: {{ .Values.indexNode.service.ports.metrics }} + targetPort: http-metrics + protocol: TCP + {{- if (and (or (eq .Values.indexNode.service.type "NodePort") (eq .Values.indexNode.service.type "LoadBalancer")) (not (empty .Values.indexNode.service.nodePorts.metrics))) }} + nodePort: {{ .Values.indexNode.service.nodePorts.metrics }} + {{- else if eq .Values.indexNode.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- end }} + {{- if .Values.indexNode.service.extraPorts }} + {{- include "common.tplvalues.render" (dict "value" .Values.indexNode.service.extraPorts "context" $) | nindent 4 }} + {{- end }} + selector: {{- include "common.labels.matchLabels" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-node +{{- end }} diff --git a/bitnami/milvus/templates/index-node/servicemonitor.yaml b/bitnami/milvus/templates/index-node/servicemonitor.yaml new file mode 100644 index 0000000000..e573bbae67 --- /dev/null +++ b/bitnami/milvus/templates/index-node/servicemonitor.yaml @@ -0,0 +1,61 @@ +{{/* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.indexNode.enabled .Values.indexNode.metrics.enabled .Values.indexNode.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "milvus.index-node.fullname" . }} + namespace: {{ default (include "common.names.namespace" .) .Values.indexNode.metrics.serviceMonitor.namespace | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-node + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.indexNode.metrics.serviceMonitor.labels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.indexNode.metrics.serviceMonitor.labels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.indexNode.metrics.serviceMonitor.annotations }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.indexNode.metrics.serviceMonitor.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.indexNode.metrics.serviceMonitor.annotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + {{- if .Values.indexNode.metrics.serviceMonitor.jobLabel }} + jobLabel: {{ .Values.indexNode.metrics.serviceMonitor.jobLabel }} + {{- end }} + selector: + matchLabels: {{ include "common.labels.matchLabels" . | nindent 6 }} + {{- if .Values.indexNode.metrics.serviceMonitor.selector }} + {{- include "common.tplvalues.render" (dict "value" .Values.indexNode.metrics.serviceMonitor.selector "context" $) | nindent 6 }} + {{- end }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-node + endpoints: + - port: http-metrics + {{- if .Values.indexNode.metrics.serviceMonitor.interval }} + interval: {{ .Values.indexNode.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.indexNode.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.indexNode.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + {{- if .Values.indexNode.metrics.serviceMonitor.honorLabels }} + honorLabels: {{ .Values.indexNode.metrics.serviceMonitor.honorLabels }} + {{- end }} + {{- if .Values.indexNode.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- toYaml .Values.indexNode.metrics.serviceMonitor.metricRelabelings | nindent 6 }} + {{- end }} + {{- if .Values.indexNode.metrics.serviceMonitor.relabelings }} + relabelings: {{- toYaml .Values.indexNode.metrics.serviceMonitor.relabelings | nindent 6 }} + {{- end }} + namespaceSelector: + matchNames: + - {{ include "common.names.namespace" . | quote }} +{{- end }} diff --git a/bitnami/milvus/templates/index-node/vpa.yaml b/bitnami/milvus/templates/index-node/vpa.yaml new file mode 100644 index 0000000000..00d37b22d0 --- /dev/null +++ b/bitnami/milvus/templates/index-node/vpa.yaml @@ -0,0 +1,51 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and (.Capabilities.APIVersions.Has "autoscaling.k8s.io/v1/VerticalPodAutoscaler") .Values.indexNode.autoscaling.vpa.enabled }} +apiVersion: {{ include "common.capabilities.vpa.apiVersion" . }} +kind: VerticalPodAutoscaler +metadata: + name: {{ include "milvus.index-node.fullname" . }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + app.kubernetes.io/component: index-node + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.indexNode.autoscaling.vpa.annotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.indexNode.autoscaling.vpa.annotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + resourcePolicy: + containerPolicies: + - containerName: milvus-index-node + {{- with .Values.indexNode.autoscaling.vpa.controlledResources }} + controlledResources: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.indexNode.autoscaling.vpa.maxAllowed }} + maxAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.indexNode.autoscaling.vpa.minAllowed }} + minAllowed: + {{- toYaml . | nindent 8 }} + {{- end }} + targetRef: + apiVersion: {{ include "common.capabilities.deployment.apiVersion" . }} + kind: Deployment + name: {{ include "milvus.index-node.fullname" . }} + {{- if .Values.indexNode.autoscaling.vpa.updatePolicy }} + updatePolicy: + {{- with .Values.indexNode.autoscaling.vpa.updatePolicy.updateMode }} + updateMode: {{ . }} + {{- end }} + {{- end }} +{{- end }} diff --git a/bitnami/milvus/templates/init-job.yaml b/bitnami/milvus/templates/init-job.yaml new file mode 100644 index 0000000000..61ff16cfc6 --- /dev/null +++ b/bitnami/milvus/templates/init-job.yaml @@ -0,0 +1,118 @@ +{{- /* +Copyright VMware, Inc. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if (include "milvus.init-job.create" .) }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ include "common.names.fullname" . }}-init + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" . | nindent 4 }} + app.kubernetes.io/part-of: milvus + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.initJob.annotations "context" $ ) | nindent 4 }} + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + backoffLimit: {{ .Values.initJob.backoffLimit }} + template: + metadata: + labels: {{- include "common.labels.standard" . | nindent 8 }} + app.kubernetes.io/component: init + {{- if .Values.initJob.podLabels }} + {{- include "common.tplvalues.render" (dict "value" .Values.initJob.podLabels "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonLabels "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.initJob.podAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.initJob.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "milvus.imagePullSecrets" . | nindent 6 }} + restartPolicy: OnFailure + {{- if .Values.initJob.podSecurityContext.enabled }} + securityContext: {{- omit .Values.initJob.podSecurityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.initJob.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.initJob.hostAliases "context" $) | nindent 8 }} + {{- end }} + initContainers: + {{- if .Values.initJob.enableDefaultInitContainers }} + {{- include "milvus.waitForProxyInitContainer" . | nindent 8 }} + {{- end }} + containers: + - name: update-credentials + image: {{ template "milvus.init-job.image" . }} + imagePullPolicy: {{ .Values.initJob.image.pullPolicy }} + command: + - /bin/bash + - -ec + args: + - | + #!/bin/bash + {{- if .Values.milvus.auth.enabled }} + echo "Updating credentials" + # Taken from https://milvus.io/docs/authenticate.md + python - < + ## @param dataCoord.service.nodePorts.grpc Node port for GRPC + ## @param dataCoord.service.nodePorts.metrics Node port for Metrics + ## + nodePorts: + grpc: "" + metrics: "" + ## @param dataCoord.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param dataCoord.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param dataCoord.service.clusterIP Data Coordinator service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param dataCoord.service.loadBalancerIP Data Coordinator service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param dataCoord.service.loadBalancerSourceRanges Data Coordinator service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param dataCoord.service.externalTrafficPolicy Data Coordinator service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param dataCoord.service.annotations Additional custom annotations for Data Coordinator service + ## + annotations: {} + ## @param dataCoord.service.extraPorts Extra ports to expose in the Data Coordinator service + ## + extraPorts: [] + + ## Network Policy configuration + ## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param dataCoord.networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param dataCoord.networkPolicy.allowExternal The Policy model to apply + ## When set to false, only pods with the correct client label will have network access to the ports Keycloak is + ## listening on. When true, Keycloak will accept connections from any source (with the correct destination port). + ## + allowExternal: true + ## @param dataCoord.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraIngress: [] + ## @param dataCoord.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param dataCoord.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param dataCoord.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + + + ## @section Data Coordinator Metrics Parameters + ## Prometheus Exporter / Metrics + ## + metrics: + ## @param dataCoord.metrics.enabled Enable metrics + ## + enabled: false + ## @param dataCoord.metrics.annotations [object] Annotations for the server service in order to scrape metrics + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.dataCoord.service.ports.grpc }}" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param dataCoord.metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator + ## + enabled: false + ## @param dataCoord.metrics.serviceMonitor.annotations Annotations for the ServiceMonitor Resource + ## + annotations: "" + ## @param dataCoord.metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param dataCoord.metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param dataCoord.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param dataCoord.metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + labels: {} + ## @param dataCoord.metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param dataCoord.metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param dataCoord.metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param dataCoord.metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param dataCoord.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + +## @section Root Coordinator Deployment Parameters +## +rootCoord: + ## @param rootCoord.enabled Enable Root Coordinator deployment + ## + enabled: true + ## @param rootCoord.extraEnvVars Array with extra environment variables to add to data coordinator nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param rootCoord.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for data coordinator nodes + ## + extraEnvVarsCM: "" + ## @param rootCoord.extraEnvVarsSecret Name of existing Secret containing extra env vars for data coordinator nodes + ## + extraEnvVarsSecret: "" + ## @param rootCoord.defaultConfig [string] Default override configuration from the common set in milvus.defaultConfig + ## + defaultConfig: | + # Override the port for internal binding (the external components will use the service port defined in milvus.defaultConfig) + rootCoord: + port: {{ .Values.rootCoord.containerPorts.grpc }} + enableActiveStandby: true + + ## @param rootCoord.existingConfigMap name of a ConfigMap with existing configuration for the default configuration + ## + existingConfigMap: "" + ## @param rootCoord.extraConfig Override configuration + ## + extraConfig: {} + ## @param rootCoord.extraConfigExistingConfigMap name of a ConfigMap with existing configuration for the Dashboard + ## + extraConfigExistingConfigMap: "" + ## @param rootCoord.command Override default container command (useful when using custom images) + ## + command: [] + ## @param rootCoord.args Override default container args (useful when using custom images) + ## + args: [] + ## @param rootCoord.replicaCount Number of Root Coordinator replicas to deploy + ## + replicaCount: 1 + ## @param rootCoord.containerPorts.grpc GRPC port for Root Coordinator + ## @param rootCoord.containerPorts.metrics Metrics port for Root Coordinator + containerPorts: + grpc: 19530 + metrics: 9091 + ## Configure extra options for Root Coordinator containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param rootCoord.livenessProbe.enabled Enable livenessProbe on Root Coordinator nodes + ## @param rootCoord.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param rootCoord.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param rootCoord.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param rootCoord.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param rootCoord.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param rootCoord.readinessProbe.enabled Enable readinessProbe on Root Coordinator nodes + ## @param rootCoord.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param rootCoord.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param rootCoord.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param rootCoord.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param rootCoord.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param rootCoord.startupProbe.enabled Enable startupProbe on Root Coordinator containers + ## @param rootCoord.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param rootCoord.startupProbe.periodSeconds Period seconds for startupProbe + ## @param rootCoord.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param rootCoord.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param rootCoord.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param rootCoord.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param rootCoord.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param rootCoord.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## data coordinator resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param rootCoord.resources.limits The resources limits for the data coordinator containers + ## @param rootCoord.resources.requests The requested resources for the data coordinator containers + ## + resources: + limits: {} + requests: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param rootCoord.podSecurityContext.enabled Enabled Root Coordinator pods' Security Context + ## @param rootCoord.podSecurityContext.fsGroup Set Root Coordinator pod's Security Context fsGroup + ## @param rootCoord.podSecurityContext.seccompProfile.type Set Root Coordinator container's Security Context seccomp profile + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + seccompProfile: + type: "RuntimeDefault" + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param rootCoord.containerSecurityContext.enabled Enabled Root Coordinator containers' Security Context + ## @param rootCoord.containerSecurityContext.runAsUser Set Root Coordinator containers' Security Context runAsUser + ## @param rootCoord.containerSecurityContext.runAsNonRoot Set Root Coordinator containers' Security Context runAsNonRoot + ## @param rootCoord.containerSecurityContext.readOnlyRootFilesystem Set Root Coordinator containers' Security Context runAsNonRoot + ## @param rootCoord.containerSecurityContext.allowPrivilegeEscalation Set Root Coordinator container's privilege escalation + ## @param rootCoord.containerSecurityContext.capabilities.drop Set Root Coordinator container's Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + ## @param rootCoord.lifecycleHooks for the data coordinator container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param rootCoord.runtimeClassName Name of the runtime class to be used by pod(s) + ## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/ + ## + runtimeClassName: "" + ## @param rootCoord.hostAliases data coordinator pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param rootCoord.podLabels Extra labels for data coordinator pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param rootCoord.podAnnotations Annotations for data coordinator pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param rootCoord.podAffinityPreset Pod affinity preset. Ignored if `data coordinator.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param rootCoord.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `data coordinator.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node data coordinator.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param rootCoord.nodeAffinityPreset.type Node affinity preset type. Ignored if `data coordinator.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param rootCoord.nodeAffinityPreset.key Node label key to match. Ignored if `data coordinator.affinity` is set + ## + key: "" + ## @param rootCoord.nodeAffinityPreset.values Node label values to match. Ignored if `data coordinator.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param rootCoord.affinity Affinity for Root Coordinator pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `rootCoord.podAffinityPreset`, `rootCoord.podAntiAffinityPreset`, and `rootCoord.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param rootCoord.nodeSelector Node labels for Root Coordinator pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param rootCoord.tolerations Tolerations for Root Coordinator pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param rootCoord.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param rootCoord.priorityClassName Root Coordinator pods' priorityClassName + ## + priorityClassName: "" + ## @param rootCoord.schedulerName Kubernetes pod scheduler registry + ## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param rootCoord.updateStrategy.type Root Coordinator statefulset strategy type + ## @param rootCoord.updateStrategy.rollingUpdate Root Coordinator statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param rootCoord.extraVolumes Optionally specify extra list of additional volumes for the Root Coordinator pod(s) + ## + extraVolumes: [] + ## @param rootCoord.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Root Coordinator container(s) + ## + extraVolumeMounts: [] + ## @param rootCoord.sidecars Add additional sidecar containers to the Root Coordinator pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param rootCoord.enableDefaultInitContainers Deploy default init containers + ## + enableDefaultInitContainers: true + ## @param rootCoord.initContainers Add additional init containers to the Root Coordinator pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Service account for Root Coordinator to use + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param rootCoord.serviceAccount.create Enable creation of ServiceAccount for Root Coordinator pods + ## + create: false + ## @param rootCoord.serviceAccount.name The name of the ServiceAccount to use + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param rootCoord.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: false + ## @param rootCoord.serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} + + ## Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb + ## @param rootCoord.pdb.create Enable/disable a Pod Disruption Budget creation + ## @param rootCoord.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## @param rootCoord.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable + ## + pdb: + create: false + minAvailable: 1 + maxUnavailable: "" + + ## @section Root Coordinator Autoscaling configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ + ## + autoscaling: + vpa: + ## @param rootCoord.autoscaling.vpa.enabled Enable VPA + ## + enabled: false + ## @param rootCoord.autoscaling.vpa.annotations Annotations for VPA resource + ## + annotations: {} + ## @param rootCoord.autoscaling.vpa.controlledResources VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + ## + controlledResources: [] + ## @param rootCoord.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + maxAllowed: {} + ## @param rootCoord.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + minAllowed: {} + updatePolicy: + ## @param rootCoord.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updateMode: Auto + hpa: + ## @param rootCoord.autoscaling.hpa.enabled Enable HPA for Milvus Data Plane + ## + enabled: false + ## @param rootCoord.autoscaling.hpa.annotations Annotations for HPA resource + ## + annotations: {} + ## @param rootCoord.autoscaling.hpa.minReplicas Minimum number of Milvus Data Plane replicas + ## + minReplicas: "" + ## @param rootCoord.autoscaling.hpa.maxReplicas Maximum number of Milvus Data Plane replicas + ## + maxReplicas: "" + ## @param rootCoord.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## + targetCPU: "" + ## @param rootCoord.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + targetMemory: "" + + ## @section Root Coordinator Traffic Exposure Parameters + ## + + ## data coordinator service parameters + ## + service: + ## @param rootCoord.service.type Root Coordinator service type + ## + type: ClusterIP + ## @param rootCoord.service.ports.grpc Root Coordinator GRPC service port + ## @param rootCoord.service.ports.metrics Root Coordinator Metrics service port + ## + ports: + grpc: 19530 + metrics: 9091 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param rootCoord.service.nodePorts.grpc Node port for GRPC + ## @param rootCoord.service.nodePorts.metrics Node port for Metrics + ## + nodePorts: + grpc: "" + metrics: "" + ## @param rootCoord.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param rootCoord.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param rootCoord.service.clusterIP Root Coordinator service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param rootCoord.service.loadBalancerIP Root Coordinator service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param rootCoord.service.loadBalancerSourceRanges Root Coordinator service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param rootCoord.service.externalTrafficPolicy Root Coordinator service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param rootCoord.service.annotations Additional custom annotations for Root Coordinator service + ## + annotations: {} + ## @param rootCoord.service.extraPorts Extra ports to expose in the Root Coordinator service + ## + extraPorts: [] + + ## Network Policy configuration + ## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param rootCoord.networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param rootCoord.networkPolicy.allowExternal The Policy model to apply + ## When set to false, only pods with the correct client label will have network access to the ports Keycloak is + ## listening on. When true, Keycloak will accept connections from any source (with the correct destination port). + ## + allowExternal: true + ## @param rootCoord.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraIngress: [] + ## @param rootCoord.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param rootCoord.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param rootCoord.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + + ## @section Root Coordinator Metrics Parameters + ## Prometheus Exporter / Metrics + ## + metrics: + ## @param rootCoord.metrics.enabled Enable metrics + ## + enabled: false + ## @param rootCoord.metrics.annotations [object] Annotations for the server service in order to scrape metrics + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.rootCoord.service.ports.grpc }}" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param rootCoord.metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator + ## + enabled: false + ## @param rootCoord.metrics.serviceMonitor.annotations Annotations for the ServiceMonitor Resource + ## + annotations: "" + ## @param rootCoord.metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param rootCoord.metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param rootCoord.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param rootCoord.metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + labels: {} + ## @param rootCoord.metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param rootCoord.metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param rootCoord.metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param rootCoord.metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param rootCoord.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + +## @section Query Coordinator Deployment Parameters +## +queryCoord: + ## @param queryCoord.enabled Enable Query Coordinator deployment + ## + enabled: true + ## @param queryCoord.extraEnvVars Array with extra environment variables to add to data coordinator nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param queryCoord.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for data coordinator nodes + ## + extraEnvVarsCM: "" + ## @param queryCoord.extraEnvVarsSecret Name of existing Secret containing extra env vars for data coordinator nodes + ## + extraEnvVarsSecret: "" + ## @param queryCoord.defaultConfig [string] Default override configuration from the common set in milvus.defaultConfig + ## + defaultConfig: | + # Override the port for internal binding (the external components will use the service port defined in milvus.defaultConfig) + queryCoord: + port: {{ .Values.queryCoord.containerPorts.grpc }} + enableActiveStandby: true + + ## @param queryCoord.existingConfigMap name of a ConfigMap with existing configuration for the default configuration + ## + existingConfigMap: "" + ## @param queryCoord.extraConfig Override configuration + ## + extraConfig: {} + ## @param queryCoord.extraConfigExistingConfigMap name of a ConfigMap with existing configuration for the Dashboard + ## + extraConfigExistingConfigMap: "" + ## @param queryCoord.command Override default container command (useful when using custom images) + ## + command: [] + ## @param queryCoord.args Override default container args (useful when using custom images) + ## + args: [] + ## @param queryCoord.replicaCount Number of Query Coordinator replicas to deploy + ## + replicaCount: 1 + ## @param queryCoord.containerPorts.grpc GRPC port for Query Coordinator + ## @param queryCoord.containerPorts.metrics Metrics port for Query Coordinator + containerPorts: + grpc: 19530 + metrics: 9091 + ## Configure extra options for Query Coordinator containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param queryCoord.livenessProbe.enabled Enable livenessProbe on Query Coordinator nodes + ## @param queryCoord.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param queryCoord.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param queryCoord.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param queryCoord.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param queryCoord.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param queryCoord.readinessProbe.enabled Enable readinessProbe on Query Coordinator nodes + ## @param queryCoord.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param queryCoord.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param queryCoord.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param queryCoord.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param queryCoord.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param queryCoord.startupProbe.enabled Enable startupProbe on Query Coordinator containers + ## @param queryCoord.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param queryCoord.startupProbe.periodSeconds Period seconds for startupProbe + ## @param queryCoord.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param queryCoord.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param queryCoord.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param queryCoord.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param queryCoord.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param queryCoord.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## data coordinator resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param queryCoord.resources.limits The resources limits for the data coordinator containers + ## @param queryCoord.resources.requests The requested resources for the data coordinator containers + ## + resources: + limits: {} + requests: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param queryCoord.podSecurityContext.enabled Enabled Query Coordinator pods' Security Context + ## @param queryCoord.podSecurityContext.fsGroup Set Query Coordinator pod's Security Context fsGroup + ## @param queryCoord.podSecurityContext.seccompProfile.type Set Query Coordinator container's Security Context seccomp profile + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + seccompProfile: + type: "RuntimeDefault" + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param queryCoord.containerSecurityContext.enabled Enabled Query Coordinator containers' Security Context + ## @param queryCoord.containerSecurityContext.runAsUser Set Query Coordinator containers' Security Context runAsUser + ## @param queryCoord.containerSecurityContext.runAsNonRoot Set Query Coordinator containers' Security Context runAsNonRoot + ## @param queryCoord.containerSecurityContext.readOnlyRootFilesystem Set Query Coordinator containers' Security Context runAsNonRoot + ## @param queryCoord.containerSecurityContext.allowPrivilegeEscalation Set Query Coordinator container's privilege escalation + ## @param queryCoord.containerSecurityContext.capabilities.drop Set Query Coordinator container's Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + ## @param queryCoord.lifecycleHooks for the data coordinator container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param queryCoord.runtimeClassName Name of the runtime class to be used by pod(s) + ## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/ + ## + runtimeClassName: "" + ## @param queryCoord.hostAliases data coordinator pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param queryCoord.podLabels Extra labels for data coordinator pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param queryCoord.podAnnotations Annotations for data coordinator pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param queryCoord.podAffinityPreset Pod affinity preset. Ignored if `data coordinator.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param queryCoord.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `data coordinator.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node data coordinator.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param queryCoord.nodeAffinityPreset.type Node affinity preset type. Ignored if `data coordinator.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param queryCoord.nodeAffinityPreset.key Node label key to match. Ignored if `data coordinator.affinity` is set + ## + key: "" + ## @param queryCoord.nodeAffinityPreset.values Node label values to match. Ignored if `data coordinator.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param queryCoord.affinity Affinity for Query Coordinator pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `queryCoord.podAffinityPreset`, `queryCoord.podAntiAffinityPreset`, and `queryCoord.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param queryCoord.nodeSelector Node labels for Query Coordinator pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param queryCoord.tolerations Tolerations for Query Coordinator pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param queryCoord.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param queryCoord.priorityClassName Query Coordinator pods' priorityClassName + ## + priorityClassName: "" + ## @param queryCoord.schedulerName Kubernetes pod scheduler registry + ## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param queryCoord.updateStrategy.type Query Coordinator statefulset strategy type + ## @param queryCoord.updateStrategy.rollingUpdate Query Coordinator statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param queryCoord.extraVolumes Optionally specify extra list of additional volumes for the Query Coordinator pod(s) + ## + extraVolumes: [] + ## @param queryCoord.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Query Coordinator container(s) + ## + extraVolumeMounts: [] + ## @param queryCoord.sidecars Add additional sidecar containers to the Query Coordinator pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param queryCoord.enableDefaultInitContainers Deploy default init containers + ## + enableDefaultInitContainers: true + ## @param queryCoord.initContainers Add additional init containers to the Query Coordinator pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Service account for Query Coordinator to use + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param queryCoord.serviceAccount.create Enable creation of ServiceAccount for Query Coordinator pods + ## + create: false + ## @param queryCoord.serviceAccount.name The name of the ServiceAccount to use + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param queryCoord.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: false + ## @param queryCoord.serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} + + ## Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb + ## @param queryCoord.pdb.create Enable/disable a Pod Disruption Budget creation + ## @param queryCoord.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## @param queryCoord.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable + ## + pdb: + create: false + minAvailable: 1 + maxUnavailable: "" + + ## @section Query Coordinator Autoscaling configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ + ## + autoscaling: + vpa: + ## @param queryCoord.autoscaling.vpa.enabled Enable VPA + ## + enabled: false + ## @param queryCoord.autoscaling.vpa.annotations Annotations for VPA resource + ## + annotations: {} + ## @param queryCoord.autoscaling.vpa.controlledResources VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + ## + controlledResources: [] + ## @param queryCoord.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + maxAllowed: {} + ## @param queryCoord.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + minAllowed: {} + updatePolicy: + ## @param queryCoord.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updateMode: Auto + hpa: + ## @param queryCoord.autoscaling.hpa.enabled Enable HPA for Milvus Data Plane + ## + enabled: false + ## @param queryCoord.autoscaling.hpa.annotations Annotations for HPA resource + ## + annotations: {} + ## @param queryCoord.autoscaling.hpa.minReplicas Minimum number of Milvus Data Plane replicas + ## + minReplicas: "" + ## @param queryCoord.autoscaling.hpa.maxReplicas Maximum number of Milvus Data Plane replicas + ## + maxReplicas: "" + ## @param queryCoord.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## + targetCPU: "" + ## @param queryCoord.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + targetMemory: "" + + ## @section Query Coordinator Traffic Exposure Parameters + ## + + ## data coordinator service parameters + ## + service: + ## @param queryCoord.service.type Query Coordinator service type + ## + type: ClusterIP + ## @param queryCoord.service.ports.grpc Query Coordinator GRPC service port + ## @param queryCoord.service.ports.metrics Query Coordinator Metrics service port + ## + ports: + grpc: 19530 + metrics: 9091 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param queryCoord.service.nodePorts.grpc Node port for GRPC + ## @param queryCoord.service.nodePorts.metrics Node port for Metrics + ## + nodePorts: + grpc: "" + metrics: "" + ## @param queryCoord.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param queryCoord.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param queryCoord.service.clusterIP Query Coordinator service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param queryCoord.service.loadBalancerIP Query Coordinator service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param queryCoord.service.loadBalancerSourceRanges Query Coordinator service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param queryCoord.service.externalTrafficPolicy Query Coordinator service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param queryCoord.service.annotations Additional custom annotations for Query Coordinator service + ## + annotations: {} + ## @param queryCoord.service.extraPorts Extra ports to expose in the Query Coordinator service + ## + extraPorts: [] + + ## Network Policy configuration + ## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param queryCoord.networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param queryCoord.networkPolicy.allowExternal The Policy model to apply + ## When set to false, only pods with the correct client label will have network access to the ports Keycloak is + ## listening on. When true, Keycloak will accept connections from any source (with the correct destination port). + ## + allowExternal: true + ## @param queryCoord.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraIngress: [] + ## @param queryCoord.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param queryCoord.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param queryCoord.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + + + ## @section Query Coordinator Metrics Parameters + ## Prometheus Exporter / Metrics + ## + metrics: + ## @param queryCoord.metrics.enabled Enable metrics + ## + enabled: false + ## @param queryCoord.metrics.annotations [object] Annotations for the server service in order to scrape metrics + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.queryCoord.service.ports.grpc }}" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param queryCoord.metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator + ## + enabled: false + ## @param queryCoord.metrics.serviceMonitor.annotations Annotations for the ServiceMonitor Resource + ## + annotations: "" + ## @param queryCoord.metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param queryCoord.metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param queryCoord.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param queryCoord.metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + labels: {} + ## @param queryCoord.metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param queryCoord.metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param queryCoord.metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param queryCoord.metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param queryCoord.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + +## @section Index Coordinator Deployment Parameters +## +indexCoord: + ## @param indexCoord.enabled Enable Index Coordinator deployment + ## + enabled: true + ## @param indexCoord.extraEnvVars Array with extra environment variables to add to data coordinator nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param indexCoord.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for data coordinator nodes + ## + extraEnvVarsCM: "" + ## @param indexCoord.extraEnvVarsSecret Name of existing Secret containing extra env vars for data coordinator nodes + ## + extraEnvVarsSecret: "" + ## @param indexCoord.defaultConfig [string] Default override configuration from the common set in milvus.defaultConfig + ## + defaultConfig: | + # Override the port for internal binding (the external components will use the service port defined in milvus.defaultConfig) + indexCoord: + port: {{ .Values.indexCoord.containerPorts.grpc }} + enableActiveStandby: true + + ## @param indexCoord.existingConfigMap name of a ConfigMap with existing configuration for the default configuration + ## + existingConfigMap: "" + ## @param indexCoord.extraConfig Override configuration + ## + extraConfig: {} + ## @param indexCoord.extraConfigExistingConfigMap name of a ConfigMap with existing configuration for the Dashboard + ## + extraConfigExistingConfigMap: "" + ## @param indexCoord.command Override default container command (useful when using custom images) + ## + command: [] + ## @param indexCoord.args Override default container args (useful when using custom images) + ## + args: [] + ## @param indexCoord.replicaCount Number of Index Coordinator replicas to deploy + ## + replicaCount: 1 + ## @param indexCoord.containerPorts.grpc GRPC port for Index Coordinator + ## @param indexCoord.containerPorts.metrics Metrics port for Index Coordinator + containerPorts: + grpc: 19530 + metrics: 9091 + ## Configure extra options for Index Coordinator containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param indexCoord.livenessProbe.enabled Enable livenessProbe on Index Coordinator nodes + ## @param indexCoord.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param indexCoord.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param indexCoord.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param indexCoord.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param indexCoord.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param indexCoord.readinessProbe.enabled Enable readinessProbe on Index Coordinator nodes + ## @param indexCoord.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param indexCoord.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param indexCoord.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param indexCoord.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param indexCoord.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param indexCoord.startupProbe.enabled Enable startupProbe on Index Coordinator containers + ## @param indexCoord.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param indexCoord.startupProbe.periodSeconds Period seconds for startupProbe + ## @param indexCoord.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param indexCoord.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param indexCoord.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param indexCoord.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param indexCoord.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param indexCoord.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## data coordinator resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param indexCoord.resources.limits The resources limits for the data coordinator containers + ## @param indexCoord.resources.requests The requested resources for the data coordinator containers + ## + resources: + limits: {} + requests: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param indexCoord.podSecurityContext.enabled Enabled Index Coordinator pods' Security Context + ## @param indexCoord.podSecurityContext.fsGroup Set Index Coordinator pod's Security Context fsGroup + ## @param indexCoord.podSecurityContext.seccompProfile.type Set Index Coordinator container's Security Context seccomp profile + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + seccompProfile: + type: "RuntimeDefault" + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param indexCoord.containerSecurityContext.enabled Enabled Index Coordinator containers' Security Context + ## @param indexCoord.containerSecurityContext.runAsUser Set Index Coordinator containers' Security Context runAsUser + ## @param indexCoord.containerSecurityContext.runAsNonRoot Set Index Coordinator containers' Security Context runAsNonRoot + ## @param indexCoord.containerSecurityContext.readOnlyRootFilesystem Set Index Coordinator containers' Security Context runAsNonRoot + ## @param indexCoord.containerSecurityContext.allowPrivilegeEscalation Set Index Coordinator container's privilege escalation + ## @param indexCoord.containerSecurityContext.capabilities.drop Set Index Coordinator container's Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + ## @param indexCoord.lifecycleHooks for the data coordinator container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param indexCoord.runtimeClassName Name of the runtime class to be used by pod(s) + ## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/ + ## + runtimeClassName: "" + ## @param indexCoord.hostAliases data coordinator pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param indexCoord.podLabels Extra labels for data coordinator pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param indexCoord.podAnnotations Annotations for data coordinator pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param indexCoord.podAffinityPreset Pod affinity preset. Ignored if `data coordinator.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param indexCoord.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `data coordinator.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node data coordinator.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param indexCoord.nodeAffinityPreset.type Node affinity preset type. Ignored if `data coordinator.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param indexCoord.nodeAffinityPreset.key Node label key to match. Ignored if `data coordinator.affinity` is set + ## + key: "" + ## @param indexCoord.nodeAffinityPreset.values Node label values to match. Ignored if `data coordinator.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param indexCoord.affinity Affinity for Index Coordinator pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `indexCoord.podAffinityPreset`, `indexCoord.podAntiAffinityPreset`, and `indexCoord.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param indexCoord.nodeSelector Node labels for Index Coordinator pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param indexCoord.tolerations Tolerations for Index Coordinator pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param indexCoord.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param indexCoord.priorityClassName Index Coordinator pods' priorityClassName + ## + priorityClassName: "" + ## @param indexCoord.schedulerName Kubernetes pod scheduler registry + ## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param indexCoord.updateStrategy.type Index Coordinator statefulset strategy type + ## @param indexCoord.updateStrategy.rollingUpdate Index Coordinator statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param indexCoord.extraVolumes Optionally specify extra list of additional volumes for the Index Coordinator pod(s) + ## + extraVolumes: [] + ## @param indexCoord.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Index Coordinator container(s) + ## + extraVolumeMounts: [] + ## @param indexCoord.sidecars Add additional sidecar containers to the Index Coordinator pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param indexCoord.enableDefaultInitContainers Deploy default init containers + ## + enableDefaultInitContainers: true + ## @param indexCoord.initContainers Add additional init containers to the Index Coordinator pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Service account for Index Coordinator to use + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param indexCoord.serviceAccount.create Enable creation of ServiceAccount for Index Coordinator pods + ## + create: false + ## @param indexCoord.serviceAccount.name The name of the ServiceAccount to use + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param indexCoord.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: false + ## @param indexCoord.serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} + + ## Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb + ## @param indexCoord.pdb.create Enable/disable a Pod Disruption Budget creation + ## @param indexCoord.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## @param indexCoord.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable + ## + pdb: + create: false + minAvailable: 1 + maxUnavailable: "" + + ## @section Index Coordinator Autoscaling configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ + ## + autoscaling: + vpa: + ## @param indexCoord.autoscaling.vpa.enabled Enable VPA + ## + enabled: false + ## @param indexCoord.autoscaling.vpa.annotations Annotations for VPA resource + ## + annotations: {} + ## @param indexCoord.autoscaling.vpa.controlledResources VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + ## + controlledResources: [] + ## @param indexCoord.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + maxAllowed: {} + ## @param indexCoord.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + minAllowed: {} + updatePolicy: + ## @param indexCoord.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updateMode: Auto + hpa: + ## @param indexCoord.autoscaling.hpa.enabled Enable HPA for Milvus Data Plane + ## + enabled: false + ## @param indexCoord.autoscaling.hpa.annotations Annotations for HPA resource + ## + annotations: {} + ## @param indexCoord.autoscaling.hpa.minReplicas Minimum number of Milvus Data Plane replicas + ## + minReplicas: "" + ## @param indexCoord.autoscaling.hpa.maxReplicas Maximum number of Milvus Data Plane replicas + ## + maxReplicas: "" + ## @param indexCoord.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## + targetCPU: "" + ## @param indexCoord.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + targetMemory: "" + + ## @section Index Coordinator Traffic Exposure Parameters + ## + + ## data coordinator service parameters + ## + service: + ## @param indexCoord.service.type Index Coordinator service type + ## + type: ClusterIP + ## @param indexCoord.service.ports.grpc Index Coordinator GRPC service port + ## @param indexCoord.service.ports.metrics Index Coordinator Metrics service port + ## + ports: + grpc: 19530 + metrics: 9091 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param indexCoord.service.nodePorts.grpc Node port for GRPC + ## @param indexCoord.service.nodePorts.metrics Node port for Metrics + ## + nodePorts: + grpc: "" + metrics: "" + ## @param indexCoord.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param indexCoord.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param indexCoord.service.clusterIP Index Coordinator service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param indexCoord.service.loadBalancerIP Index Coordinator service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param indexCoord.service.loadBalancerSourceRanges Index Coordinator service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param indexCoord.service.externalTrafficPolicy Index Coordinator service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param indexCoord.service.annotations Additional custom annotations for Index Coordinator service + ## + annotations: {} + ## @param indexCoord.service.extraPorts Extra ports to expose in the Index Coordinator service + ## + extraPorts: [] + + ## Network Policy configuration + ## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param indexCoord.networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param indexCoord.networkPolicy.allowExternal The Policy model to apply + ## When set to false, only pods with the correct client label will have network access to the ports Keycloak is + ## listening on. When true, Keycloak will accept connections from any source (with the correct destination port). + ## + allowExternal: true + ## @param indexCoord.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraIngress: [] + ## @param indexCoord.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param indexCoord.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param indexCoord.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + + + ## @section Index Coordinator Metrics Parameters + ## Prometheus Exporter / Metrics + ## + metrics: + ## @param indexCoord.metrics.enabled Enable metrics + ## + enabled: false + ## @param indexCoord.metrics.annotations [object] Annotations for the server service in order to scrape metrics + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.indexCoord.service.ports.grpc }}" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param indexCoord.metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator + ## + enabled: false + ## @param indexCoord.metrics.serviceMonitor.annotations Annotations for the ServiceMonitor Resource + ## + annotations: "" + ## @param indexCoord.metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param indexCoord.metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param indexCoord.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param indexCoord.metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + labels: {} + ## @param indexCoord.metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param indexCoord.metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param indexCoord.metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param indexCoord.metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param indexCoord.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + +## @section Data Node Deployment Parameters +## +dataNode: + ## @param dataNode.enabled Enable Data Node deployment + ## + enabled: true + ## @param dataNode.extraEnvVars Array with extra environment variables to add to data node nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param dataNode.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for data node nodes + ## + extraEnvVarsCM: "" + ## @param dataNode.extraEnvVarsSecret Name of existing Secret containing extra env vars for data node nodes + ## + extraEnvVarsSecret: "" + ## @param dataNode.defaultConfig [string] Default override configuration from the common set in milvus.defaultConfig + ## + defaultConfig: | + # Override the port for internal binding (the external components will use the service port defined in milvus.defaultConfig) + dataNode: + port: {{ .Values.dataNode.containerPorts.grpc }} + enableDisk: true + + ## @param dataNode.existingConfigMap name of a ConfigMap with existing configuration for the default configuration + ## + existingConfigMap: "" + ## @param dataNode.extraConfig Override configuration + ## + extraConfig: {} + ## @param dataNode.extraConfigExistingConfigMap name of a ConfigMap with existing configuration for the Dashboard + ## + extraConfigExistingConfigMap: "" + ## @param dataNode.command Override default container command (useful when using custom images) + ## + command: [] + ## @param dataNode.args Override default container args (useful when using custom images) + ## + args: [] + ## @param dataNode.replicaCount Number of Data Node replicas to deploy + ## + replicaCount: 1 + ## @param dataNode.containerPorts.grpc GRPC port for Data Node + ## @param dataNode.containerPorts.metrics Metrics port for Data Node + containerPorts: + grpc: 19530 + metrics: 9091 + ## Configure extra options for Data Node containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param dataNode.livenessProbe.enabled Enable livenessProbe on Data Node nodes + ## @param dataNode.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param dataNode.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param dataNode.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param dataNode.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param dataNode.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param dataNode.readinessProbe.enabled Enable readinessProbe on Data Node nodes + ## @param dataNode.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param dataNode.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param dataNode.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param dataNode.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param dataNode.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param dataNode.startupProbe.enabled Enable startupProbe on Data Node containers + ## @param dataNode.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param dataNode.startupProbe.periodSeconds Period seconds for startupProbe + ## @param dataNode.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param dataNode.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param dataNode.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param dataNode.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param dataNode.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param dataNode.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## data node resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param dataNode.resources.limits The resources limits for the data node containers + ## @param dataNode.resources.requests The requested resources for the data node containers + ## + resources: + limits: {} + requests: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param dataNode.podSecurityContext.enabled Enabled Data Node pods' Security Context + ## @param dataNode.podSecurityContext.fsGroup Set Data Node pod's Security Context fsGroup + ## @param dataNode.podSecurityContext.seccompProfile.type Set Data Node container's Security Context seccomp profile + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + seccompProfile: + type: "RuntimeDefault" + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param dataNode.containerSecurityContext.enabled Enabled Data Node containers' Security Context + ## @param dataNode.containerSecurityContext.runAsUser Set Data Node containers' Security Context runAsUser + ## @param dataNode.containerSecurityContext.runAsNonRoot Set Data Node containers' Security Context runAsNonRoot + ## @param dataNode.containerSecurityContext.readOnlyRootFilesystem Set Data Node containers' Security Context runAsNonRoot + ## @param dataNode.containerSecurityContext.allowPrivilegeEscalation Set Data Node container's privilege escalation + ## @param dataNode.containerSecurityContext.capabilities.drop Set Data Node container's Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + ## @param dataNode.lifecycleHooks for the data node container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param dataNode.runtimeClassName Name of the runtime class to be used by pod(s) + ## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/ + ## + runtimeClassName: "" + ## @param dataNode.hostAliases data node pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param dataNode.podLabels Extra labels for data node pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param dataNode.podAnnotations Annotations for data node pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param dataNode.podAffinityPreset Pod affinity preset. Ignored if `data node.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param dataNode.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `data node.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node data node.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param dataNode.nodeAffinityPreset.type Node affinity preset type. Ignored if `data node.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param dataNode.nodeAffinityPreset.key Node label key to match. Ignored if `data node.affinity` is set + ## + key: "" + ## @param dataNode.nodeAffinityPreset.values Node label values to match. Ignored if `data node.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param dataNode.affinity Affinity for Data Node pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `dataNode.podAffinityPreset`, `dataNode.podAntiAffinityPreset`, and `dataNode.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param dataNode.nodeSelector Node labels for Data Node pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param dataNode.tolerations Tolerations for Data Node pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param dataNode.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param dataNode.priorityClassName Data Node pods' priorityClassName + ## + priorityClassName: "" + ## @param dataNode.schedulerName Kubernetes pod scheduler registry + ## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param dataNode.updateStrategy.type Data Node statefulset strategy type + ## @param dataNode.updateStrategy.rollingUpdate Data Node statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param dataNode.extraVolumes Optionally specify extra list of additional volumes for the Data Node pod(s) + ## + extraVolumes: [] + ## @param dataNode.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Data Node container(s) + ## + extraVolumeMounts: [] + ## @param dataNode.sidecars Add additional sidecar containers to the Data Node pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param dataNode.enableDefaultInitContainers Deploy default init containers + ## + enableDefaultInitContainers: true + ## @param dataNode.initContainers Add additional init containers to the Data Node pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Service account for Data Node to use + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param dataNode.serviceAccount.create Enable creation of ServiceAccount for Data Node pods + ## + create: false + ## @param dataNode.serviceAccount.name The name of the ServiceAccount to use + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param dataNode.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: false + ## @param dataNode.serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} + + ## Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb + ## @param dataNode.pdb.create Enable/disable a Pod Disruption Budget creation + ## @param dataNode.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## @param dataNode.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable + ## + pdb: + create: false + minAvailable: 1 + maxUnavailable: "" + + ## @section Data Node Autoscaling configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ + ## + autoscaling: + vpa: + ## @param dataNode.autoscaling.vpa.enabled Enable VPA + ## + enabled: false + ## @param dataNode.autoscaling.vpa.annotations Annotations for VPA resource + ## + annotations: {} + ## @param dataNode.autoscaling.vpa.controlledResources VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + ## + controlledResources: [] + ## @param dataNode.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + maxAllowed: {} + ## @param dataNode.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + minAllowed: {} + updatePolicy: + ## @param dataNode.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updateMode: Auto + hpa: + ## @param dataNode.autoscaling.hpa.enabled Enable HPA for Milvus Data Plane + ## + enabled: false + ## @param dataNode.autoscaling.hpa.annotations Annotations for HPA resource + ## + annotations: {} + ## @param dataNode.autoscaling.hpa.minReplicas Minimum number of Milvus Data Plane replicas + ## + minReplicas: "" + ## @param dataNode.autoscaling.hpa.maxReplicas Maximum number of Milvus Data Plane replicas + ## + maxReplicas: "" + ## @param dataNode.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## + targetCPU: "" + ## @param dataNode.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + targetMemory: "" + + ## @section Data Node Traffic Exposure Parameters + ## + + ## data node service parameters + ## + service: + ## @param dataNode.service.type Data Node service type + ## + type: ClusterIP + ## @param dataNode.service.ports.grpc Data Node GRPC service port + ## @param dataNode.service.ports.metrics Data Node Metrics service port + ## + ports: + grpc: 19530 + metrics: 9091 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param dataNode.service.nodePorts.grpc Node port for GRPC + ## @param dataNode.service.nodePorts.metrics Node port for Metrics + ## + nodePorts: + grpc: "" + metrics: "" + ## @param dataNode.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param dataNode.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param dataNode.service.clusterIP Data Node service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param dataNode.service.loadBalancerIP Data Node service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param dataNode.service.loadBalancerSourceRanges Data Node service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param dataNode.service.externalTrafficPolicy Data Node service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param dataNode.service.annotations Additional custom annotations for Data Node service + ## + annotations: {} + ## @param dataNode.service.extraPorts Extra ports to expose in the Data Node service + ## + extraPorts: [] + + ## Network Policy configuration + ## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param dataNode.networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param dataNode.networkPolicy.allowExternal The Policy model to apply + ## When set to false, only pods with the correct client label will have network access to the ports Keycloak is + ## listening on. When true, Keycloak will accept connections from any source (with the correct destination port). + ## + allowExternal: true + ## @param dataNode.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraIngress: [] + ## @param dataNode.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param dataNode.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param dataNode.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + + + ## @section Data Node Metrics Parameters + ## Prometheus Exporter / Metrics + ## + metrics: + ## @param dataNode.metrics.enabled Enable metrics + ## + enabled: false + ## @param dataNode.metrics.annotations [object] Annotations for the server service in order to scrape metrics + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.dataNode.service.ports.grpc }}" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param dataNode.metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator + ## + enabled: false + ## @param dataNode.metrics.serviceMonitor.annotations Annotations for the ServiceMonitor Resource + ## + annotations: "" + ## @param dataNode.metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param dataNode.metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param dataNode.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param dataNode.metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + labels: {} + ## @param dataNode.metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param dataNode.metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param dataNode.metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param dataNode.metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param dataNode.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + +## @section Query Node Deployment Parameters +## +queryNode: + ## @param queryNode.enabled Enable Query Node deployment + ## + enabled: true + ## @param queryNode.extraEnvVars Array with extra environment variables to add to data node nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param queryNode.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for data node nodes + ## + extraEnvVarsCM: "" + ## @param queryNode.extraEnvVarsSecret Name of existing Secret containing extra env vars for data node nodes + ## + extraEnvVarsSecret: "" + ## @param queryNode.defaultConfig [string] Default override configuration from the common set in milvus.defaultConfig + ## + defaultConfig: | + # Override the port for internal binding (the external components will use the service port defined in milvus.defaultConfig) + queryNode: + port: {{ .Values.queryNode.containerPorts.grpc }} + enableDisk: true + + ## @param queryNode.existingConfigMap name of a ConfigMap with existing configuration for the default configuration + ## + existingConfigMap: "" + ## @param queryNode.extraConfig Override configuration + ## + extraConfig: {} + ## @param queryNode.extraConfigExistingConfigMap name of a ConfigMap with existing configuration for the Dashboard + ## + extraConfigExistingConfigMap: "" + ## @param queryNode.command Override default container command (useful when using custom images) + ## + command: [] + ## @param queryNode.args Override default container args (useful when using custom images) + ## + args: [] + ## @param queryNode.replicaCount Number of Query Node replicas to deploy + ## + replicaCount: 1 + ## @param queryNode.containerPorts.grpc GRPC port for Query Node + ## @param queryNode.containerPorts.metrics Metrics port for Query Node + containerPorts: + grpc: 19530 + metrics: 9091 + ## Configure extra options for Query Node containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param queryNode.livenessProbe.enabled Enable livenessProbe on Query Node nodes + ## @param queryNode.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param queryNode.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param queryNode.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param queryNode.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param queryNode.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param queryNode.readinessProbe.enabled Enable readinessProbe on Query Node nodes + ## @param queryNode.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param queryNode.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param queryNode.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param queryNode.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param queryNode.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param queryNode.startupProbe.enabled Enable startupProbe on Query Node containers + ## @param queryNode.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param queryNode.startupProbe.periodSeconds Period seconds for startupProbe + ## @param queryNode.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param queryNode.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param queryNode.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param queryNode.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param queryNode.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param queryNode.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## data node resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param queryNode.resources.limits The resources limits for the data node containers + ## @param queryNode.resources.requests The requested resources for the data node containers + ## + resources: + limits: {} + requests: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param queryNode.podSecurityContext.enabled Enabled Query Node pods' Security Context + ## @param queryNode.podSecurityContext.fsGroup Set Query Node pod's Security Context fsGroup + ## @param queryNode.podSecurityContext.seccompProfile.type Set Query Node container's Security Context seccomp profile + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + seccompProfile: + type: "RuntimeDefault" + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param queryNode.containerSecurityContext.enabled Enabled Query Node containers' Security Context + ## @param queryNode.containerSecurityContext.runAsUser Set Query Node containers' Security Context runAsUser + ## @param queryNode.containerSecurityContext.runAsNonRoot Set Query Node containers' Security Context runAsNonRoot + ## @param queryNode.containerSecurityContext.readOnlyRootFilesystem Set Query Node containers' Security Context runAsNonRoot + ## @param queryNode.containerSecurityContext.allowPrivilegeEscalation Set Query Node container's privilege escalation + ## @param queryNode.containerSecurityContext.capabilities.drop Set Query Node container's Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + ## @param queryNode.lifecycleHooks for the data node container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param queryNode.runtimeClassName Name of the runtime class to be used by pod(s) + ## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/ + ## + runtimeClassName: "" + ## @param queryNode.hostAliases data node pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param queryNode.podLabels Extra labels for data node pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param queryNode.podAnnotations Annotations for data node pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param queryNode.podAffinityPreset Pod affinity preset. Ignored if `data node.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param queryNode.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `data node.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node data node.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param queryNode.nodeAffinityPreset.type Node affinity preset type. Ignored if `data node.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param queryNode.nodeAffinityPreset.key Node label key to match. Ignored if `data node.affinity` is set + ## + key: "" + ## @param queryNode.nodeAffinityPreset.values Node label values to match. Ignored if `data node.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param queryNode.affinity Affinity for Query Node pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `queryNode.podAffinityPreset`, `queryNode.podAntiAffinityPreset`, and `queryNode.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param queryNode.nodeSelector Node labels for Query Node pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param queryNode.tolerations Tolerations for Query Node pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param queryNode.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param queryNode.priorityClassName Query Node pods' priorityClassName + ## + priorityClassName: "" + ## @param queryNode.schedulerName Kubernetes pod scheduler registry + ## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param queryNode.updateStrategy.type Query Node statefulset strategy type + ## @param queryNode.updateStrategy.rollingUpdate Query Node statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param queryNode.extraVolumes Optionally specify extra list of additional volumes for the Query Node pod(s) + ## + extraVolumes: [] + ## @param queryNode.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Query Node container(s) + ## + extraVolumeMounts: [] + ## @param queryNode.sidecars Add additional sidecar containers to the Query Node pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param queryNode.enableDefaultInitContainers Deploy default init containers + ## + enableDefaultInitContainers: true + ## @param queryNode.initContainers Add additional init containers to the Query Node pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Service account for Query Node to use + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param queryNode.serviceAccount.create Enable creation of ServiceAccount for Query Node pods + ## + create: false + ## @param queryNode.serviceAccount.name The name of the ServiceAccount to use + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param queryNode.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: false + ## @param queryNode.serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} + + ## Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb + ## @param queryNode.pdb.create Enable/disable a Pod Disruption Budget creation + ## @param queryNode.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## @param queryNode.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable + ## + pdb: + create: false + minAvailable: 1 + maxUnavailable: "" + + ## @section Query Node Autoscaling configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ + ## + autoscaling: + vpa: + ## @param queryNode.autoscaling.vpa.enabled Enable VPA + ## + enabled: false + ## @param queryNode.autoscaling.vpa.annotations Annotations for VPA resource + ## + annotations: {} + ## @param queryNode.autoscaling.vpa.controlledResources VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + ## + controlledResources: [] + ## @param queryNode.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + maxAllowed: {} + ## @param queryNode.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + minAllowed: {} + updatePolicy: + ## @param queryNode.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updateMode: Auto + hpa: + ## @param queryNode.autoscaling.hpa.enabled Enable HPA for Milvus Data Plane + ## + enabled: false + ## @param queryNode.autoscaling.hpa.annotations Annotations for HPA resource + ## + annotations: {} + ## @param queryNode.autoscaling.hpa.minReplicas Minimum number of Milvus Data Plane replicas + ## + minReplicas: "" + ## @param queryNode.autoscaling.hpa.maxReplicas Maximum number of Milvus Data Plane replicas + ## + maxReplicas: "" + ## @param queryNode.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## + targetCPU: "" + ## @param queryNode.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + targetMemory: "" + + ## @section Query Node Traffic Exposure Parameters + ## + + ## data node service parameters + ## + service: + ## @param queryNode.service.type Query Node service type + ## + type: ClusterIP + ## @param queryNode.service.ports.grpc Query Node GRPC service port + ## @param queryNode.service.ports.metrics Query Node Metrics service port + ## + ports: + grpc: 19530 + metrics: 9091 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param queryNode.service.nodePorts.grpc Node port for GRPC + ## @param queryNode.service.nodePorts.metrics Node port for Metrics + ## + nodePorts: + grpc: "" + metrics: "" + ## @param queryNode.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param queryNode.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param queryNode.service.clusterIP Query Node service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param queryNode.service.loadBalancerIP Query Node service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param queryNode.service.loadBalancerSourceRanges Query Node service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param queryNode.service.externalTrafficPolicy Query Node service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param queryNode.service.annotations Additional custom annotations for Query Node service + ## + annotations: {} + ## @param queryNode.service.extraPorts Extra ports to expose in the Query Node service + ## + extraPorts: [] + + ## Network Policy configuration + ## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param queryNode.networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param queryNode.networkPolicy.allowExternal The Policy model to apply + ## When set to false, only pods with the correct client label will have network access to the ports Keycloak is + ## listening on. When true, Keycloak will accept connections from any source (with the correct destination port). + ## + allowExternal: true + ## @param queryNode.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraIngress: [] + ## @param queryNode.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param queryNode.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param queryNode.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + + + ## @section Query Node Metrics Parameters + ## Prometheus Exporter / Metrics + ## + metrics: + ## @param queryNode.metrics.enabled Enable metrics + ## + enabled: false + ## @param queryNode.metrics.annotations [object] Annotations for the server service in order to scrape metrics + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.queryNode.service.ports.grpc }}" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param queryNode.metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator + ## + enabled: false + ## @param queryNode.metrics.serviceMonitor.annotations Annotations for the ServiceMonitor Resource + ## + annotations: "" + ## @param queryNode.metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param queryNode.metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param queryNode.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param queryNode.metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + labels: {} + ## @param queryNode.metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param queryNode.metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param queryNode.metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param queryNode.metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param queryNode.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + +## @section Index Node Deployment Parameters +## +indexNode: + ## @param indexNode.enabled Enable Index Node deployment + ## + enabled: true + ## @param indexNode.extraEnvVars Array with extra environment variables to add to data node nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param indexNode.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for data node nodes + ## + extraEnvVarsCM: "" + ## @param indexNode.extraEnvVarsSecret Name of existing Secret containing extra env vars for data node nodes + ## + extraEnvVarsSecret: "" + ## @param indexNode.defaultConfig [string] Default override configuration from the common set in milvus.defaultConfig + ## + defaultConfig: | + # Override the port for internal binding (the external components will use the service port defined in milvus.defaultConfig) + indexNode: + port: {{ .Values.indexNode.containerPorts.grpc }} + enableDisk: true + + ## @param indexNode.existingConfigMap name of a ConfigMap with existing configuration for the default configuration + ## + existingConfigMap: "" + ## @param indexNode.extraConfig Override configuration + ## + extraConfig: {} + ## @param indexNode.extraConfigExistingConfigMap name of a ConfigMap with existing configuration for the Dashboard + ## + extraConfigExistingConfigMap: "" + ## @param indexNode.command Override default container command (useful when using custom images) + ## + command: [] + ## @param indexNode.args Override default container args (useful when using custom images) + ## + args: [] + ## @param indexNode.replicaCount Number of Index Node replicas to deploy + ## + replicaCount: 1 + ## @param indexNode.containerPorts.grpc GRPC port for Index Node + ## @param indexNode.containerPorts.metrics Metrics port for Index Node + containerPorts: + grpc: 19530 + metrics: 9091 + ## Configure extra options for Index Node containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param indexNode.livenessProbe.enabled Enable livenessProbe on Index Node nodes + ## @param indexNode.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param indexNode.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param indexNode.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param indexNode.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param indexNode.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param indexNode.readinessProbe.enabled Enable readinessProbe on Index Node nodes + ## @param indexNode.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param indexNode.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param indexNode.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param indexNode.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param indexNode.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param indexNode.startupProbe.enabled Enable startupProbe on Index Node containers + ## @param indexNode.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param indexNode.startupProbe.periodSeconds Period seconds for startupProbe + ## @param indexNode.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param indexNode.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param indexNode.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param indexNode.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param indexNode.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param indexNode.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## data node resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param indexNode.resources.limits The resources limits for the data node containers + ## @param indexNode.resources.requests The requested resources for the data node containers + ## + resources: + limits: {} + requests: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param indexNode.podSecurityContext.enabled Enabled Index Node pods' Security Context + ## @param indexNode.podSecurityContext.fsGroup Set Index Node pod's Security Context fsGroup + ## @param indexNode.podSecurityContext.seccompProfile.type Set Index Node container's Security Context seccomp profile + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + seccompProfile: + type: "RuntimeDefault" + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param indexNode.containerSecurityContext.enabled Enabled Index Node containers' Security Context + ## @param indexNode.containerSecurityContext.runAsUser Set Index Node containers' Security Context runAsUser + ## @param indexNode.containerSecurityContext.runAsNonRoot Set Index Node containers' Security Context runAsNonRoot + ## @param indexNode.containerSecurityContext.readOnlyRootFilesystem Set Index Node containers' Security Context runAsNonRoot + ## @param indexNode.containerSecurityContext.allowPrivilegeEscalation Set Index Node container's privilege escalation + ## @param indexNode.containerSecurityContext.capabilities.drop Set Index Node container's Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + ## @param indexNode.lifecycleHooks for the data node container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param indexNode.runtimeClassName Name of the runtime class to be used by pod(s) + ## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/ + ## + runtimeClassName: "" + ## @param indexNode.hostAliases data node pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param indexNode.podLabels Extra labels for data node pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param indexNode.podAnnotations Annotations for data node pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param indexNode.podAffinityPreset Pod affinity preset. Ignored if `data node.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param indexNode.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `data node.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node data node.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param indexNode.nodeAffinityPreset.type Node affinity preset type. Ignored if `data node.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param indexNode.nodeAffinityPreset.key Node label key to match. Ignored if `data node.affinity` is set + ## + key: "" + ## @param indexNode.nodeAffinityPreset.values Node label values to match. Ignored if `data node.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param indexNode.affinity Affinity for Index Node pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `indexNode.podAffinityPreset`, `indexNode.podAntiAffinityPreset`, and `indexNode.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param indexNode.nodeSelector Node labels for Index Node pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param indexNode.tolerations Tolerations for Index Node pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param indexNode.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param indexNode.priorityClassName Index Node pods' priorityClassName + ## + priorityClassName: "" + ## @param indexNode.schedulerName Kubernetes pod scheduler registry + ## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param indexNode.updateStrategy.type Index Node statefulset strategy type + ## @param indexNode.updateStrategy.rollingUpdate Index Node statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param indexNode.extraVolumes Optionally specify extra list of additional volumes for the Index Node pod(s) + ## + extraVolumes: [] + ## @param indexNode.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Index Node container(s) + ## + extraVolumeMounts: [] + ## @param indexNode.sidecars Add additional sidecar containers to the Index Node pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param indexNode.enableDefaultInitContainers Deploy default init containers + ## + enableDefaultInitContainers: true + ## @param indexNode.initContainers Add additional init containers to the Index Node pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Service account for Index Node to use + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param indexNode.serviceAccount.create Enable creation of ServiceAccount for Index Node pods + ## + create: false + ## @param indexNode.serviceAccount.name The name of the ServiceAccount to use + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param indexNode.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: false + ## @param indexNode.serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} + + ## Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb + ## @param indexNode.pdb.create Enable/disable a Pod Disruption Budget creation + ## @param indexNode.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## @param indexNode.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable + ## + pdb: + create: false + minAvailable: 1 + maxUnavailable: "" + + ## @section Index Node Autoscaling configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ + ## + autoscaling: + vpa: + ## @param indexNode.autoscaling.vpa.enabled Enable VPA + ## + enabled: false + ## @param indexNode.autoscaling.vpa.annotations Annotations for VPA resource + ## + annotations: {} + ## @param indexNode.autoscaling.vpa.controlledResources VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + ## + controlledResources: [] + ## @param indexNode.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + maxAllowed: {} + ## @param indexNode.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + minAllowed: {} + updatePolicy: + ## @param indexNode.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updateMode: Auto + hpa: + ## @param indexNode.autoscaling.hpa.enabled Enable HPA for Milvus Data Plane + ## + enabled: false + ## @param indexNode.autoscaling.hpa.annotations Annotations for HPA resource + ## + annotations: {} + ## @param indexNode.autoscaling.hpa.minReplicas Minimum number of Milvus Data Plane replicas + ## + minReplicas: "" + ## @param indexNode.autoscaling.hpa.maxReplicas Maximum number of Milvus Data Plane replicas + ## + maxReplicas: "" + ## @param indexNode.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## + targetCPU: "" + ## @param indexNode.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + targetMemory: "" + + ## @section Index Node Traffic Exposure Parameters + ## + + ## data node service parameters + ## + service: + ## @param indexNode.service.type Index Node service type + ## + type: ClusterIP + ## @param indexNode.service.ports.grpc Index Node GRPC service port + ## @param indexNode.service.ports.metrics Index Node Metrics service port + ## + ports: + grpc: 19530 + metrics: 9091 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param indexNode.service.nodePorts.grpc Node port for GRPC + ## @param indexNode.service.nodePorts.metrics Node port for Metrics + ## + nodePorts: + grpc: "" + metrics: "" + ## @param indexNode.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param indexNode.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param indexNode.service.clusterIP Index Node service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param indexNode.service.loadBalancerIP Index Node service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param indexNode.service.loadBalancerSourceRanges Index Node service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param indexNode.service.externalTrafficPolicy Index Node service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param indexNode.service.annotations Additional custom annotations for Index Node service + ## + annotations: {} + ## @param indexNode.service.extraPorts Extra ports to expose in the Index Node service + ## + extraPorts: [] + + ## Network Policy configuration + ## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param indexNode.networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param indexNode.networkPolicy.allowExternal The Policy model to apply + ## When set to false, only pods with the correct client label will have network access to the ports Keycloak is + ## listening on. When true, Keycloak will accept connections from any source (with the correct destination port). + ## + allowExternal: true + ## @param indexNode.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraIngress: [] + ## @param indexNode.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param indexNode.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param indexNode.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + + + ## @section Index Node Metrics Parameters + ## Prometheus Exporter / Metrics + ## + metrics: + ## @param indexNode.metrics.enabled Enable metrics + ## + enabled: false + ## @param indexNode.metrics.annotations [object] Annotations for the server service in order to scrape metrics + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.indexNode.service.ports.grpc }}" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param indexNode.metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator + ## + enabled: false + ## @param indexNode.metrics.serviceMonitor.annotations Annotations for the ServiceMonitor Resource + ## + annotations: "" + ## @param indexNode.metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param indexNode.metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param indexNode.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param indexNode.metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + labels: {} + ## @param indexNode.metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param indexNode.metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param indexNode.metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param indexNode.metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param indexNode.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + +## @section Proxy Deployment Parameters +## +proxy: + ## @param proxy.enabled Enable Proxy deployment + ## + enabled: true + ## @param proxy.extraEnvVars Array with extra environment variables to add to proxy nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param proxy.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for proxy nodes + ## + extraEnvVarsCM: "" + ## @param proxy.extraEnvVarsSecret Name of existing Secret containing extra env vars for proxy nodes + ## + extraEnvVarsSecret: "" + ## @param proxy.defaultConfig [string] Default override configuration from the common set in milvus.defaultConfig + ## + defaultConfig: | + # Override the port for internal binding (the external components will use the service port defined in milvus.defaultConfig) + proxy: + port: {{ .Values.proxy.containerPorts.grpc }} + internalPort: {{ .Values.proxy.containerPorts.grpcInternal }} + + ## @param proxy.existingConfigMap name of a ConfigMap with existing configuration for the default configuration + ## + existingConfigMap: "" + ## @param proxy.extraConfig Override configuration + ## + extraConfig: {} + ## @param proxy.extraConfigExistingConfigMap name of a ConfigMap with existing configuration for the Dashboard + ## + extraConfigExistingConfigMap: "" + ## @param proxy.command Override default container command (useful when using custom images) + ## + command: [] + ## @param proxy.args Override default container args (useful when using custom images) + ## + args: [] + ## @param proxy.replicaCount Number of Proxy replicas to deploy + ## + replicaCount: 1 + ## @param proxy.containerPorts.grpc GRPC port for Proxy + ## @param proxy.containerPorts.grpcInternal GRPC internal port for Proxy + ## @param proxy.containerPorts.metrics Metrics port for Proxy + containerPorts: + grpc: 19530 + grpcInternal: 19529 + metrics: 9091 + ## Configure extra options for Proxy containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param proxy.livenessProbe.enabled Enable livenessProbe on Proxy nodes + ## @param proxy.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param proxy.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param proxy.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param proxy.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param proxy.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param proxy.readinessProbe.enabled Enable readinessProbe on Proxy nodes + ## @param proxy.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param proxy.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param proxy.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param proxy.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param proxy.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param proxy.startupProbe.enabled Enable startupProbe on Proxy containers + ## @param proxy.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param proxy.startupProbe.periodSeconds Period seconds for startupProbe + ## @param proxy.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param proxy.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param proxy.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param proxy.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param proxy.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param proxy.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## proxy resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param proxy.resources.limits The resources limits for the proxy containers + ## @param proxy.resources.requests The requested resources for the proxy containers + ## + resources: + limits: {} + requests: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param proxy.podSecurityContext.enabled Enabled Proxy pods' Security Context + ## @param proxy.podSecurityContext.fsGroup Set Proxy pod's Security Context fsGroup + ## @param proxy.podSecurityContext.seccompProfile.type Set Proxy container's Security Context seccomp profile + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + seccompProfile: + type: "RuntimeDefault" + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param proxy.containerSecurityContext.enabled Enabled Proxy containers' Security Context + ## @param proxy.containerSecurityContext.runAsUser Set Proxy containers' Security Context runAsUser + ## @param proxy.containerSecurityContext.runAsNonRoot Set Proxy containers' Security Context runAsNonRoot + ## @param proxy.containerSecurityContext.readOnlyRootFilesystem Set Proxy containers' Security Context runAsNonRoot + ## @param proxy.containerSecurityContext.allowPrivilegeEscalation Set Proxy container's privilege escalation + ## @param proxy.containerSecurityContext.capabilities.drop Set Proxy container's Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + ## @param proxy.lifecycleHooks for the proxy container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param proxy.runtimeClassName Name of the runtime class to be used by pod(s) + ## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/ + ## + runtimeClassName: "" + ## @param proxy.hostAliases proxy pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param proxy.podLabels Extra labels for proxy pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param proxy.podAnnotations Annotations for proxy pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param proxy.podAffinityPreset Pod affinity preset. Ignored if `proxy.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param proxy.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `proxy.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node proxy.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param proxy.nodeAffinityPreset.type Node affinity preset type. Ignored if `proxy.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param proxy.nodeAffinityPreset.key Node label key to match. Ignored if `proxy.affinity` is set + ## + key: "" + ## @param proxy.nodeAffinityPreset.values Node label values to match. Ignored if `proxy.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param proxy.affinity Affinity for Proxy pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `proxy.podAffinityPreset`, `proxy.podAntiAffinityPreset`, and `proxy.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param proxy.nodeSelector Node labels for Proxy pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param proxy.tolerations Tolerations for Proxy pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param proxy.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param proxy.priorityClassName Proxy pods' priorityClassName + ## + priorityClassName: "" + ## @param proxy.schedulerName Kubernetes pod scheduler registry + ## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param proxy.updateStrategy.type Proxy statefulset strategy type + ## @param proxy.updateStrategy.rollingUpdate Proxy statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param proxy.extraVolumes Optionally specify extra list of additional volumes for the Proxy pod(s) + ## + extraVolumes: [] + ## @param proxy.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Proxy container(s) + ## + extraVolumeMounts: [] + ## @param proxy.sidecars Add additional sidecar containers to the Proxy pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param proxy.enableDefaultInitContainers Deploy default init containers + ## + enableDefaultInitContainers: true + ## @param proxy.initContainers Add additional init containers to the Proxy pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Service account for Proxy to use + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param proxy.serviceAccount.create Enable creation of ServiceAccount for Proxy pods + ## + create: false + ## @param proxy.serviceAccount.name The name of the ServiceAccount to use + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param proxy.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: false + ## @param proxy.serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} + + ## Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb + ## @param proxy.pdb.create Enable/disable a Pod Disruption Budget creation + ## @param proxy.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## @param proxy.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable + ## + pdb: + create: false + minAvailable: 1 + maxUnavailable: "" + + ## @section Proxy Autoscaling configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ + ## + autoscaling: + vpa: + ## @param proxy.autoscaling.vpa.enabled Enable VPA + ## + enabled: false + ## @param proxy.autoscaling.vpa.annotations Annotations for VPA resource + ## + annotations: {} + ## @param proxy.autoscaling.vpa.controlledResources VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + ## + controlledResources: [] + ## @param proxy.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + maxAllowed: {} + ## @param proxy.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + minAllowed: {} + updatePolicy: + ## @param proxy.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updateMode: Auto + hpa: + ## @param proxy.autoscaling.hpa.enabled Enable HPA for Milvus Data Plane + ## + enabled: false + ## @param proxy.autoscaling.hpa.annotations Annotations for HPA resource + ## + annotations: {} + ## @param proxy.autoscaling.hpa.minReplicas Minimum number of Milvus Data Plane replicas + ## + minReplicas: "" + ## @param proxy.autoscaling.hpa.maxReplicas Maximum number of Milvus Data Plane replicas + ## + maxReplicas: "" + ## @param proxy.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## + targetCPU: "" + ## @param proxy.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + targetMemory: "" + + ## @section Proxy Traffic Exposure Parameters + ## + + ## proxy service parameters + ## + service: + ## @param proxy.service.type Proxy service type + ## + type: LoadBalancer + ## @param proxy.service.ports.grpc Proxy GRPC service port + ## @param proxy.service.ports.metrics Proxy Metrics service port + ## + ports: + grpc: 19530 + metrics: 9091 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param proxy.service.nodePorts.grpc Node port for GRPC + ## @param proxy.service.nodePorts.metrics Node port for Metrics + ## + nodePorts: + grpc: "" + metrics: "" + ## @param proxy.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param proxy.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param proxy.service.clusterIP Proxy service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param proxy.service.loadBalancerIP Proxy service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param proxy.service.loadBalancerSourceRanges Proxy service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param proxy.service.externalTrafficPolicy Proxy service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param proxy.service.annotations Additional custom annotations for Proxy service + ## + annotations: {} + ## @param proxy.service.extraPorts Extra ports to expose in the Proxy service + ## + extraPorts: [] + + ## Network Policy configuration + ## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param proxy.networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param proxy.networkPolicy.allowExternal The Policy model to apply + ## When set to false, only pods with the correct client label will have network access to the ports Keycloak is + ## listening on. When true, Keycloak will accept connections from any source (with the correct destination port). + ## + allowExternal: true + ## @param proxy.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraIngress: [] + ## @param proxy.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param proxy.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param proxy.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + + + ## @section Proxy Metrics Parameters + ## Prometheus Exporter / Metrics + ## + metrics: + ## @param proxy.metrics.enabled Enable metrics + ## + enabled: false + ## @param proxy.metrics.annotations [object] Annotations for the server service in order to scrape metrics + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.proxy.service.ports.grpc }}" + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param proxy.metrics.serviceMonitor.enabled Create ServiceMonitor Resource for scraping metrics using Prometheus Operator + ## + enabled: false + ## @param proxy.metrics.serviceMonitor.annotations Annotations for the ServiceMonitor Resource + ## + annotations: "" + ## @param proxy.metrics.serviceMonitor.namespace Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: "" + ## @param proxy.metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + interval: "" + ## @param proxy.metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + scrapeTimeout: "" + ## @param proxy.metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + labels: {} + ## @param proxy.metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## + selector: {} + ## @param proxy.metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## + relabelings: [] + ## @param proxy.metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## + metricRelabelings: [] + ## @param proxy.metrics.serviceMonitor.honorLabels Specify honorLabels parameter to add the scrape endpoint + ## + honorLabels: false + ## @param proxy.metrics.serviceMonitor.jobLabel The name of the label on the target service to use as the job name in prometheus. + ## + jobLabel: "" + +## @section Attu Deployment Parameters +## +attu: + ## @param attu.enabled Enable Attu deployment + ## + enabled: true + ## Bitnami Attu image + ## ref: https://hub.docker.com/r/bitnami/attu/tags/ + ## @param attu.image.registry Attu image registry + ## @param attu.image.repository Attu image repository + ## @param attu.image.tag Attu image tag (immutable tags are recommended) + ## @param attu.image.digest Attu image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param attu.image.pullPolicy Attu image pull policy + ## @param attu.image.pullSecrets Attu image pull secrets + ## @param attu.image.debug Enable debug mode + ## + image: + registry: docker.io + repository: bitnami/attu + tag: 2.2.6-debian-11-r1 + digest: "" + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: https://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Enable debug mode + ## + debug: false + ## @param attu.extraEnvVars Array with extra environment variables to add to attu nodes + ## e.g: + ## extraEnvVars: + ## - name: FOO + ## value: "bar" + ## + extraEnvVars: [] + ## @param attu.extraEnvVarsCM Name of existing ConfigMap containing extra env vars for attu nodes + ## + extraEnvVarsCM: "" + ## @param attu.extraEnvVarsSecret Name of existing Secret containing extra env vars for attu nodes + ## + extraEnvVarsSecret: "" + ## @param attu.command Override default container command (useful when using custom images) + ## + command: [] + ## @param attu.args Override default container args (useful when using custom images) + ## + args: [] + ## @param attu.replicaCount Number of Attu replicas to deploy + ## + replicaCount: 1 + ## @param attu.containerPorts.http HTTP port for Attu + containerPorts: + http: 3000 + ## Configure extra options for Attu containers' liveness, readiness and startup probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/#configure-probes + ## @param attu.livenessProbe.enabled Enable livenessProbe on Attu nodes + ## @param attu.livenessProbe.initialDelaySeconds Initial delay seconds for livenessProbe + ## @param attu.livenessProbe.periodSeconds Period seconds for livenessProbe + ## @param attu.livenessProbe.timeoutSeconds Timeout seconds for livenessProbe + ## @param attu.livenessProbe.failureThreshold Failure threshold for livenessProbe + ## @param attu.livenessProbe.successThreshold Success threshold for livenessProbe + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param attu.readinessProbe.enabled Enable readinessProbe on Attu nodes + ## @param attu.readinessProbe.initialDelaySeconds Initial delay seconds for readinessProbe + ## @param attu.readinessProbe.periodSeconds Period seconds for readinessProbe + ## @param attu.readinessProbe.timeoutSeconds Timeout seconds for readinessProbe + ## @param attu.readinessProbe.failureThreshold Failure threshold for readinessProbe + ## @param attu.readinessProbe.successThreshold Success threshold for readinessProbe + ## + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param attu.startupProbe.enabled Enable startupProbe on Attu containers + ## @param attu.startupProbe.initialDelaySeconds Initial delay seconds for startupProbe + ## @param attu.startupProbe.periodSeconds Period seconds for startupProbe + ## @param attu.startupProbe.timeoutSeconds Timeout seconds for startupProbe + ## @param attu.startupProbe.failureThreshold Failure threshold for startupProbe + ## @param attu.startupProbe.successThreshold Success threshold for startupProbe + ## + startupProbe: + enabled: false + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 5 + successThreshold: 1 + ## @param attu.customLivenessProbe Custom livenessProbe that overrides the default one + ## + customLivenessProbe: {} + ## @param attu.customReadinessProbe Custom readinessProbe that overrides the default one + ## + customReadinessProbe: {} + ## @param attu.customStartupProbe Custom startupProbe that overrides the default one + ## + customStartupProbe: {} + ## attu resource requests and limits + ## ref: https://kubernetes.io/docs/user-guide/compute-resources/ + ## @param attu.resources.limits The resources limits for the attu containers + ## @param attu.resources.requests The requested resources for the attu containers + ## + resources: + limits: {} + requests: {} + ## Configure Pods Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param attu.podSecurityContext.enabled Enabled Attu pods' Security Context + ## @param attu.podSecurityContext.fsGroup Set Attu pod's Security Context fsGroup + ## @param attu.podSecurityContext.seccompProfile.type Set Attu container's Security Context seccomp profile + ## + podSecurityContext: + enabled: true + fsGroup: 1001 + seccompProfile: + type: "RuntimeDefault" + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param attu.containerSecurityContext.enabled Enabled Attu containers' Security Context + ## @param attu.containerSecurityContext.runAsUser Set Attu containers' Security Context runAsUser + ## @param attu.containerSecurityContext.runAsNonRoot Set Attu containers' Security Context runAsNonRoot + ## @param attu.containerSecurityContext.readOnlyRootFilesystem Set Attu containers' Security Context runAsNonRoot + ## @param attu.containerSecurityContext.allowPrivilegeEscalation Set Attu container's privilege escalation + ## @param attu.containerSecurityContext.capabilities.drop Set Attu container's Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + ## @param attu.lifecycleHooks for the attu container(s) to automate configuration before or after startup + ## + lifecycleHooks: {} + ## @param attu.runtimeClassName Name of the runtime class to be used by pod(s) + ## ref: https://kubernetes.io/docs/concepts/containers/runtime-class/ + ## + runtimeClassName: "" + ## @param attu.hostAliases attu pods host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param attu.podLabels Extra labels for attu pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param attu.podAnnotations Annotations for attu pods + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param attu.podAffinityPreset Pod affinity preset. Ignored if `attu.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param attu.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `attu.affinity` is set. Allowed values: `soft` or `hard` + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## Node attu.affinity preset + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## + nodeAffinityPreset: + ## @param attu.nodeAffinityPreset.type Node affinity preset type. Ignored if `attu.affinity` is set. Allowed values: `soft` or `hard` + ## + type: "" + ## @param attu.nodeAffinityPreset.key Node label key to match. Ignored if `attu.affinity` is set + ## + key: "" + ## @param attu.nodeAffinityPreset.values Node label values to match. Ignored if `attu.affinity` is set + ## E.g. + ## values: + ## - e2e-az1 + ## - e2e-az2 + ## + values: [] + ## @param attu.affinity Affinity for Attu pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## NOTE: `attu.podAffinityPreset`, `attu.podAntiAffinityPreset`, and `attu.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} + ## @param attu.nodeSelector Node labels for Attu pods assignment + ## ref: https://kubernetes.io/docs/user-guide/node-selection/ + ## + nodeSelector: {} + ## @param attu.tolerations Tolerations for Attu pods assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ + ## + tolerations: [] + ## @param attu.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods + ## + topologySpreadConstraints: [] + ## @param attu.priorityClassName Attu pods' priorityClassName + ## + priorityClassName: "" + ## @param attu.schedulerName Kubernetes pod scheduler registry + ## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param attu.updateStrategy.type Attu statefulset strategy type + ## @param attu.updateStrategy.rollingUpdate Attu statefulset rolling update configuration parameters + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies + ## + updateStrategy: + type: RollingUpdate + rollingUpdate: {} + ## @param attu.extraVolumes Optionally specify extra list of additional volumes for the Attu pod(s) + ## + extraVolumes: [] + ## @param attu.extraVolumeMounts Optionally specify extra list of additional volumeMounts for the Attu container(s) + ## + extraVolumeMounts: [] + ## @param attu.sidecars Add additional sidecar containers to the Attu pod(s) + ## e.g: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + ## @param attu.enableDefaultInitContainers Deploy default init containers + ## + enableDefaultInitContainers: true + ## @param attu.initContainers Add additional init containers to the Attu pod(s) + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/init-containers/ + ## e.g: + ## initContainers: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## command: ['sh', '-c', 'echo "hello world"'] + ## + initContainers: [] + ## Service account for Attu to use + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## + serviceAccount: + ## @param attu.serviceAccount.create Enable creation of ServiceAccount for Attu pods + ## + create: false + ## @param attu.serviceAccount.name The name of the ServiceAccount to use + ## If not set and create is true, a name is generated using the common.names.fullname template + ## + name: "" + ## @param attu.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## Can be set to false if pods using this serviceAccount do not need to use K8s API + ## + automountServiceAccountToken: false + ## @param attu.serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + annotations: {} + + ## Pod Disruption Budget configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb + ## @param attu.pdb.create Enable/disable a Pod Disruption Budget creation + ## @param attu.pdb.minAvailable Minimum number/percentage of pods that should remain scheduled + ## @param attu.pdb.maxUnavailable Maximum number/percentage of pods that may be made unavailable + ## + pdb: + create: false + minAvailable: 1 + maxUnavailable: "" + + ## @section Attu Autoscaling configuration + ## ref: https://kubernetes.io/docs/tasks/run-application/horizontal-pod-autoscale/ + ## + autoscaling: + vpa: + ## @param attu.autoscaling.vpa.enabled Enable VPA + ## + enabled: false + ## @param attu.autoscaling.vpa.annotations Annotations for VPA resource + ## + annotations: {} + ## @param attu.autoscaling.vpa.controlledResources VPA List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory + ## + controlledResources: [] + ## @param attu.autoscaling.vpa.maxAllowed VPA Max allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + maxAllowed: {} + ## @param attu.autoscaling.vpa.minAllowed VPA Min allowed resources for the pod + ## cpu: 200m + ## memory: 100Mi + minAllowed: {} + updatePolicy: + ## @param attu.autoscaling.vpa.updatePolicy.updateMode Autoscaling update policy Specifies whether recommended updates are applied when a Pod is started and whether recommended updates are applied during the life of a Pod + ## Possible values are "Off", "Initial", "Recreate", and "Auto". + ## + updateMode: Auto + hpa: + ## @param attu.autoscaling.hpa.enabled Enable HPA for Milvus Data Plane + ## + enabled: false + ## @param attu.autoscaling.hpa.annotations Annotations for HPA resource + ## + annotations: {} + ## @param attu.autoscaling.hpa.minReplicas Minimum number of Milvus Data Plane replicas + ## + minReplicas: "" + ## @param attu.autoscaling.hpa.maxReplicas Maximum number of Milvus Data Plane replicas + ## + maxReplicas: "" + ## @param attu.autoscaling.hpa.targetCPU Target CPU utilization percentage + ## + targetCPU: "" + ## @param attu.autoscaling.hpa.targetMemory Target Memory utilization percentage + ## + targetMemory: "" + + ## @section Attu Traffic Exposure Parameters + ## + + ## attu service parameters + ## + service: + ## @param attu.service.type Attu service type + ## + type: LoadBalancer + ## @param attu.service.ports.http Attu HTTP service port + ## + ports: + http: 80 + ## Node ports to expose + ## NOTE: choose port between <30000-32767> + ## @param attu.service.nodePorts.http Node port for HTTP + ## + nodePorts: + http: "" + ## @param attu.service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param attu.service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/user-guide/services/ + ## + sessionAffinity: None + ## @param attu.service.clusterIP Attu service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param attu.service.loadBalancerIP Attu service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-loadbalancer + ## + loadBalancerIP: "" + ## @param attu.service.loadBalancerSourceRanges Attu service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param attu.service.externalTrafficPolicy Attu service external traffic policy + ## ref https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param attu.service.annotations Additional custom annotations for Attu service + ## + annotations: {} + ## @param attu.service.extraPorts Extra ports to expose in the Attu service + ## + extraPorts: [] + + ## ref: http://kubernetes.io/docs/user-guide/ingress/ + ## + ingress: + ## @param attu.ingress.enabled Enable ingress record generation for Milvus + ## + enabled: false + ## @param attu.ingress.pathType Ingress path type + ## + pathType: ImplementationSpecific + ## @param attu.ingress.apiVersion Force Ingress API version (automatically detected if not set) + ## + apiVersion: "" + ## @param attu.ingress.hostname Default host for the ingress record + ## + hostname: milvus.local + ## @param attu.ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ + ## + ingressClassName: "" + ## @param attu.ingress.path Default path for the ingress record + ## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers + ## + path: / + ## @param attu.ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. + ## Use this parameter to set the required annotations for cert-manager, see + ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations + ## e.g: + ## annotations: + ## kubernetes.io/ingress.class: nginx + ## cert-manager.io/cluster-issuer: cluster-issuer-name + ## + annotations: {} + ## @param attu.ingress.tls Enable TLS configuration for the host defined at `attu.ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.ingress.hostname }}` + ## You can: + ## - Use the `attu.ingress.secrets` parameter to create this TLS secret + ## - Rely on cert-manager to create it by setting the corresponding annotations + ## - Rely on Helm to create self-signed certificates by setting `attu.ingress.selfSigned=true` + ## + tls: false + ## @param attu.ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm + ## + selfSigned: false + ## @param attu.ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record + ## e.g: + ## extraHosts: + ## - name: milvus.local + ## path: / + ## + extraHosts: [] + ## @param attu.ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host + ## e.g: + ## extraPaths: + ## - path: /* + ## backend: + ## serviceName: ssl-redirect + ## servicePort: use-annotation + ## + extraPaths: [] + ## @param attu.ingress.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: + ## extraTls: + ## - hosts: + ## - milvus.local + ## secretName: milvus.local-tls + ## + extraTls: [] + ## @param attu.ingress.secrets Custom TLS certificates as secrets + ## NOTE: 'key' and 'certificate' are expected in PEM format + ## NOTE: 'name' should line up with a 'secretName' set further up + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + ## e.g: + ## secrets: + ## - name: milvus.local-tls + ## key: |- + ## -----BEGIN RSA PRIVATE KEY----- + ## ... + ## -----END RSA PRIVATE KEY----- + ## certificate: |- + ## -----BEGIN CERTIFICATE----- + ## ... + ## -----END CERTIFICATE----- + ## + secrets: [] + ## @param attu.ingress.extraRules Additional rules to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules + ## e.g: + ## extraRules: + ## - host: example.local + ## http: + ## path: / + ## backend: + ## service: + ## name: example-svc + ## port: + ## name: http + ## + extraRules: [] + + ## Network Policy configuration + ## ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ + ## + networkPolicy: + ## @param attu.networkPolicy.enabled Enable creation of NetworkPolicy resources + ## + enabled: false + ## @param attu.networkPolicy.allowExternal The Policy model to apply + ## When set to false, only pods with the correct client label will have network access to the ports Keycloak is + ## listening on. When true, Keycloak will accept connections from any source (with the correct destination port). + ## + allowExternal: true + ## @param attu.networkPolicy.extraIngress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraIngress: + ## - ports: + ## - port: 1234 + ## from: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraIngress: [] + ## @param attu.networkPolicy.extraEgress [array] Add extra ingress rules to the NetworkPolicy + ## e.g: + ## extraEgress: + ## - ports: + ## - port: 1234 + ## to: + ## - podSelector: + ## - matchLabels: + ## - role: frontend + ## - podSelector: + ## - matchExpressions: + ## - key: role + ## operator: In + ## values: + ## - frontend + ## + extraEgress: [] + ## @param attu.networkPolicy.ingressNSMatchLabels [object] Labels to match to allow traffic from other namespaces + ## @param attu.networkPolicy.ingressNSPodMatchLabels [object] Pod labels to match to allow traffic from other namespaces + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +## @section Init Container Parameters +## + +waitContainer: + ## @param waitContainer.image.registry Init container wait-container image registry + ## @param waitContainer.image.repository Init container wait-container image name + ## @param waitContainer.image.tag Init container wait-container image tag + ## @param waitContainer.image.digest Init container wait-container image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: 11-debian-11-r127 + digest: "" + ## @param waitContainer.image.pullPolicy Init container wait-container image pull policy + ## + pullPolicy: IfNotPresent + ## @param waitContainer.image.pullSecrets [array] Specify docker-registry secret names as an array + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## Configure Container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param waitContainer.containerSecurityContext.enabled Enabled Milvus containers' Security Context + ## @param waitContainer.containerSecurityContext.runAsUser Set Milvus containers' Security Context runAsUser + ## @param waitContainer.containerSecurityContext.runAsNonRoot Set Milvus containers' Security Context runAsNonRoot + ## @param waitContainer.containerSecurityContext.readOnlyRootFilesystem Set Milvus containers' Security Context runAsNonRoot + ## @param waitContainer.containerSecurityContext.allowPrivilegeEscalation Set Milvus container's privilege escalation + ## @param waitContainer.containerSecurityContext.capabilities.drop Set Milvus container's Security Context runAsNonRoot + ## + containerSecurityContext: + enabled: true + runAsUser: 1001 + runAsNonRoot: true + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + +## @section External etcd parameters +## +externalEtcd: + ## @param externalEtcd.servers List of hostnames of the external etcd + ## + servers: [] + ## @param externalEtcd.port Port of the external etcd instance + ## + port: 2379 + ## @param externalEtcd.secureTransport Use TLS for client-to-server communications + ## + secureTransport: false + +## @section External S3 parameters +## All of these values are only used when redis.enabled is set to false +## @param externalS3.host External S3 host +## @param externalS3.port External S3 port number +## @param externalS3.accessKeyID External S3 access key ID +## @param externalS3.accessKeySecret External S3 access key secret +## @param externalS3.existingSecret Name of an existing secret resource containing the S3 credentials +## @param externalS3.existingSecretAccessKeyIDKey Name of an existing secret key containing the S3 access key ID +## @param externalS3.existingSecretKeySecretKey Name of an existing secret key containing the S3 access key secret +## @param externalS3.protocol External S3 protocol +## @param externalS3.bucket External S3 bucket +## @param externalS3.rootPath External S3 root path +## @param externalS3.iamEndpoint External S3 IAM endpoint +## @param externalS3.cloudProvider External S3 cloud provider +## +externalS3: + host: "" + port: 443 + accessKeyID: "" + accessKeySecret: "" + existingSecret: "" + existingSecretAccessKeyIDKey: "root-user" + existingSecretKeySecretKey: "root-password" + protocol: "https" + bucket: "milvus" + rootPath: "file" + iamEndpoint: "" + cloudProvider: "" + +## @section External Kafka parameters +## All of these values are ignored when kafka.enabled is set to true +## +externalKafka: + ## @param externalKafka.servers External Kafka brokers + ## @param externalKafka.port External Kafka port + ## Multiple brokers can be provided in a comma separated list, e.g. host1:port1,host2:port2 + ## + servers: + - localhost + ## + ## + port: 9092 + +## @section etcd sub-chart parameters +## +etcd: + ## @param etcd.enabled Deploy etcd sub-chart + ## + enabled: true + ## @param etcd.replicaCount Number of etcd replicas + ## + replicaCount: 3 + ## @param etcd.containerPorts.client Container port for etcd + ## + containerPorts: + client: 2379 + ## @param etcd.auth.rbac.create Switch to enable RBAC authentication + ## @param etcd.auth.client.secureTransport use TLS for client-to-server communications + ## + auth: + rbac: + # Milvus does not have support for etcd authentication + # https://github.com/milvus-io/milvus/blob/master/pkg/util/paramtable/service_param.go#L93 + create: false + client: + secureTransport: false + +## @section MinIO® chart parameters +## @extra minio For full list of MinIO® values configurations please refere [here](https://github.com/bitnami/charts/tree/main/bitnami/minio) +## +minio: + ## @param minio.enabled Enable/disable MinIO® chart installation + ## to be used as an objstore for Mastodon + ## + enabled: true + ## MinIO® authentication parameters + ## + auth: + ## @param minio.auth.rootUser MinIO® root username + ## + rootUser: admin + ## @param minio.auth.rootPassword Password for MinIO® root user + ## + rootPassword: "" + ## @param minio.auth.existingSecret Name of an existing secret containing the MinIO® credentials + ## + existingSecret: "" + ## @param minio.defaultBuckets Comma, semi-colon or space separated list of MinIO® buckets to create + ## + defaultBuckets: "milvus" + + ## @param minio.provisioning.enabled Enable/disable MinIO® provisioning job + ## @param minio.provisioning.extraCommands Extra commands to run on MinIO® provisioning job + ## + provisioning: + enabled: true + # We need to allow downloads in order for the UI to work + extraCommands: ["mc anonymous set download provisioning/milvus"] + + ## @param minio.tls.enabled Enable/disable MinIO® TLS support + ## + tls: + enabled: false + ## @param minio.service.type MinIO® service type + ## @param minio.service.loadBalancerIP MinIO® service LoadBalancer IP + ## @param minio.service.ports.api MinIO® service port + ## + service: + type: ClusterIP + loadBalancerIP: "" + ports: + api: 80 + +## @section kafka sub-chart paramaters +## https://github.com/bitnami/charts/blob/main/bitnami/kafka/values.yaml +## +kafka: + ## @param kafka.enabled Enable/disable Kafka chart installation + ## + enabled: true + ## @param kafka.replicaCount Number of Kafka brokers + ## + replicaCount: 1 + service: + ## @param kafka.service.ports.client Kafka svc port for client connections + ## + ports: + client: 9092 + auth: + ## @param kafka.auth.clientProtocol Kafka authentication protocol for the client + ## + clientProtocol: sasl + sasl: + ## @param kafka.auth.sasl.mechanisms Kafka authentication mechanisms for SASL + ## + mechanisms: plain + jaas: + ## @param kafka.auth.sasl.jaas.clientUsers Kafka client users + ## + clientUsers: + - user