From 78a8a8da38974586285f81d2c66fd27dc4d0c62f Mon Sep 17 00:00:00 2001 From: Juan Ariza Toledano Date: Thu, 22 May 2025 10:23:10 +0200 Subject: [PATCH] [bitnami/influxdb] feat: InfluxDB 3 major version (#33743) --- .vib/influxdb/cypress/cypress.config.js | 13 - .../cypress/cypress/e2e/influxdb.cy.js | 55 - .../cypress/cypress/fixtures/buckets.json | 5 - .../fixtures/dashboards/health_tracker.json | 63 - .../fixtures/sample_data/glucose_levels.txt | 4 - .../cypress/cypress/support/commands.js | 39 - .vib/influxdb/cypress/cypress/support/e2e.js | 25 - .../influxdb/cypress/cypress/support/utils.js | 15 - .vib/influxdb/ginkgo/go.mod | 51 +- .vib/influxdb/ginkgo/go.sum | 119 +- .vib/influxdb/ginkgo/influxdb_suite_test.go | 33 +- .vib/influxdb/ginkgo/influxdb_test.go | 49 +- .vib/influxdb/goss/goss.yaml | 37 +- .vib/influxdb/runtime-parameters.yaml | 178 +- .vib/influxdb/vib-verify.json | 20 +- bitnami/influxdb/CHANGELOG.md | 12 +- bitnami/influxdb/Chart.lock | 6 +- bitnami/influxdb/Chart.yaml | 19 +- bitnami/influxdb/README.md | 740 ++++--- bitnami/influxdb/files/conf/README.md | 7 - .../docker-entrypoint-initdb.d/README.md | 5 - bitnami/influxdb/templates/NOTES.txt | 154 +- bitnami/influxdb/templates/_helpers.tpl | 196 +- .../influxdb/templates/_init_containers.tpl | 37 + bitnami/influxdb/templates/certs.yaml | 87 + .../influxdb/templates/configmap-backup.yaml | 72 - .../templates/configmap-initdb-scripts.yaml | 11 +- bitnami/influxdb/templates/configmap.yaml | 21 - .../templates/create-admin-token-job.yaml | 223 ++ .../create-admin-token-serviceaccount.yaml | 19 + .../influxdb/templates/cronjob-backup.yaml | 311 --- .../templates/delete-admin-token-job.yaml | 84 + .../delete-admin-token-serviceaccount.yaml | 19 + bitnami/influxdb/templates/deployment.yaml | 447 ++-- bitnami/influxdb/templates/hpa.yaml | 42 + .../templates/ingress-tls-secret.yaml | 48 + bitnami/influxdb/templates/ingress.yaml | 29 +- bitnami/influxdb/templates/networkpolicy.yaml | 29 +- bitnami/influxdb/templates/pdb.yaml | 20 +- bitnami/influxdb/templates/psp-role.yaml | 22 - .../influxdb/templates/psp-rolebinding.yaml | 24 - bitnami/influxdb/templates/psp.yaml | 44 - bitnami/influxdb/templates/pvc-backup.yaml | 28 - bitnami/influxdb/templates/pvc.yaml | 16 +- bitnami/influxdb/templates/rbac.yaml | 78 + .../influxdb/templates/secrets-backup.yaml | 54 - bitnami/influxdb/templates/secrets.yaml | 29 - .../influxdb/templates/service-collectd.yaml | 54 - .../influxdb/templates/service-metrics.yaml | 54 - bitnami/influxdb/templates/service.yaml | 68 +- .../influxdb/templates/serviceaccount.yaml | 11 +- .../influxdb/templates/servicemonitor.yaml | 20 +- bitnami/influxdb/templates/store-secret.yaml | 28 + bitnami/influxdb/templates/tls-secret.yaml | 75 + bitnami/influxdb/templates/vpa.yaml | 45 + bitnami/influxdb/values.yaml | 1792 ++++++++--------- 56 files changed, 2812 insertions(+), 2974 deletions(-) delete mode 100644 .vib/influxdb/cypress/cypress.config.js delete mode 100644 .vib/influxdb/cypress/cypress/e2e/influxdb.cy.js delete mode 100644 .vib/influxdb/cypress/cypress/fixtures/buckets.json delete mode 100644 .vib/influxdb/cypress/cypress/fixtures/dashboards/health_tracker.json delete mode 100644 .vib/influxdb/cypress/cypress/fixtures/sample_data/glucose_levels.txt delete mode 100644 .vib/influxdb/cypress/cypress/support/commands.js delete mode 100644 .vib/influxdb/cypress/cypress/support/e2e.js delete mode 100644 .vib/influxdb/cypress/cypress/support/utils.js delete mode 100644 bitnami/influxdb/files/conf/README.md delete mode 100644 bitnami/influxdb/files/docker-entrypoint-initdb.d/README.md create mode 100644 bitnami/influxdb/templates/_init_containers.tpl create mode 100644 bitnami/influxdb/templates/certs.yaml delete mode 100644 bitnami/influxdb/templates/configmap-backup.yaml delete mode 100644 bitnami/influxdb/templates/configmap.yaml create mode 100644 bitnami/influxdb/templates/create-admin-token-job.yaml create mode 100644 bitnami/influxdb/templates/create-admin-token-serviceaccount.yaml delete mode 100644 bitnami/influxdb/templates/cronjob-backup.yaml create mode 100644 bitnami/influxdb/templates/delete-admin-token-job.yaml create mode 100644 bitnami/influxdb/templates/delete-admin-token-serviceaccount.yaml create mode 100644 bitnami/influxdb/templates/hpa.yaml create mode 100644 bitnami/influxdb/templates/ingress-tls-secret.yaml delete mode 100644 bitnami/influxdb/templates/psp-role.yaml delete mode 100644 bitnami/influxdb/templates/psp-rolebinding.yaml delete mode 100644 bitnami/influxdb/templates/psp.yaml delete mode 100644 bitnami/influxdb/templates/pvc-backup.yaml create mode 100644 bitnami/influxdb/templates/rbac.yaml delete mode 100644 bitnami/influxdb/templates/secrets-backup.yaml delete mode 100644 bitnami/influxdb/templates/secrets.yaml delete mode 100644 bitnami/influxdb/templates/service-collectd.yaml delete mode 100644 bitnami/influxdb/templates/service-metrics.yaml create mode 100644 bitnami/influxdb/templates/store-secret.yaml create mode 100644 bitnami/influxdb/templates/tls-secret.yaml create mode 100644 bitnami/influxdb/templates/vpa.yaml diff --git a/.vib/influxdb/cypress/cypress.config.js b/.vib/influxdb/cypress/cypress.config.js deleted file mode 100644 index ac947c12b4..0000000000 --- a/.vib/influxdb/cypress/cypress.config.js +++ /dev/null @@ -1,13 +0,0 @@ -module.exports = { - env: { - username: 'influxAdmin', - password: 'RootP4ssw0rd', - bucket: 'primary', - org: 'primary', - }, - defaultCommandTimeout: 30000, - e2e: { - setupNodeEvents(on, config) {}, - baseUrl: 'http://localhost', - }, -} diff --git a/.vib/influxdb/cypress/cypress/e2e/influxdb.cy.js b/.vib/influxdb/cypress/cypress/e2e/influxdb.cy.js deleted file mode 100644 index 044d936841..0000000000 --- a/.vib/influxdb/cypress/cypress/e2e/influxdb.cy.js +++ /dev/null @@ -1,55 +0,0 @@ -/* - * Copyright Broadcom, Inc. All Rights Reserved. - * SPDX-License-Identifier: APACHE-2.0 - */ - -/// -import { random, selectOrg } from '../support/utils'; - -it('can create a new bucket', () => { - cy.login(); - selectOrg(); - cy.visitInOrg('/load-data/buckets'); - cy.get('[data-testid="Create Bucket"]').click(); - cy.fixture('buckets').then((buckets) => { - cy.get('[data-testid="bucket-form-name"]').type( - `${buckets.newBucket.name}-${random}` - ); - cy.get('[data-testid="bucket-form-submit"]').click(); - cy.get(`[data-testid="bucket-card ${buckets.newBucket.name}-${random}"]`); - }); -}); - -it('allows to import and visualize new data to DB', () => { - const MAX_SAMPLE_VALUE = '2.43'; - cy.login(); - selectOrg(); - - // Import sample data into the DB - cy.visitInOrg('/load-data/file-upload/lp'); - cy.contains('[data-testid="list-item"]', Cypress.env('bucket')).click(); - cy.get('[type="file"]').selectFile( - 'cypress/fixtures/sample_data/glucose_levels.txt', - { force: true } - ); - cy.get('[data-testid="write-data--button"]').click(); - cy.contains('Written Successfully'); - - // Import a preconfigured dashboard to visualize sample data - cy.visitInOrg('/dashboards-list/'); - cy.get('[data-testid="page-control-bar"]').within(() => { - cy.get('[data-testid="add-resource-dropdown--button"]').click(); - cy.get('[data-testid="add-resource-dropdown--import"]').click(); - }) - const newDashboard = 'cypress/fixtures/dashboards/health_tracker.json'; - cy.readFile(newDashboard).then((obj) => { - obj[0].spec.name = `Health Tracker ${random}`; - cy.writeFile(newDashboard, obj); - }); - cy.get('[type="file"]').selectFile(newDashboard, { force: true }); - cy.get('[data-testid="submit-button Dashboard"]').click(); - cy.contains('Successfully imported'); - cy.visitInOrg('/dashboards-list'); - cy.contains(`Health Tracker ${random}`).click(); - cy.contains(MAX_SAMPLE_VALUE); -}); diff --git a/.vib/influxdb/cypress/cypress/fixtures/buckets.json b/.vib/influxdb/cypress/cypress/fixtures/buckets.json deleted file mode 100644 index 8f50b1f52f..0000000000 --- a/.vib/influxdb/cypress/cypress/fixtures/buckets.json +++ /dev/null @@ -1,5 +0,0 @@ -{ - "newBucket": { - "name": "Super Bucket" - } -} diff --git a/.vib/influxdb/cypress/cypress/fixtures/dashboards/health_tracker.json b/.vib/influxdb/cypress/cypress/fixtures/dashboards/health_tracker.json deleted file mode 100644 index a882a160a7..0000000000 --- a/.vib/influxdb/cypress/cypress/fixtures/dashboards/health_tracker.json +++ /dev/null @@ -1,63 +0,0 @@ -[ - { - "apiVersion": "influxdata.com/v2alpha1", - "kind": "Dashboard", - "metadata": { - "name": "health-tracker-template" - }, - "spec": { - "charts": [ - { - "axes": [ - { - "base": "10", - "name": "x", - "scale": "linear" - }, - { - "base": "10", - "name": "y", - "scale": "linear" - } - ], - "colorizeRows": true, - "colors": [ - { - "id": "base", - "name": "laser", - "type": "text", - "hex": "#00C9FF" - } - ], - "decimalPlaces": 2, - "height": 4, - "hoverDimension": "auto", - "kind": "Single_Stat_Plus_Line", - "legendColorizeRows": true, - "legendOpacity": 1, - "legendOrientationThreshold": 100000000, - "name": "Glucose Levels", - "opacity": 1, - "orientationThreshold": 100000000, - "position": "overlaid", - "queries": [ - { - "query": "from(bucket: \"primary\")\n |> range(start: time(v: \"2022-08-16T22:00:00.000Z\"), stop: time(v: \"2022-08-17T21:00:00.929Z\"))\n |> filter(fn: (r) => r[\"_measurement\"] == \"health\")\n |> filter(fn: (r) => r[\"_field\"] == \"glucose\")\n |> aggregateWindow(every: v.windowPeriod, fn: max, createEmpty: false)\n |> yield(name: \"max\")" - } - ], - "staticLegend": { - "colorizeRows": true, - "opacity": 1, - "orientationThreshold": 100000000, - "widthRatio": 1 - }, - "width": 4, - "widthRatio": 1, - "xCol": "_time", - "yCol": "_value" - } - ], - "name": "Health Tracker" - } - } -] diff --git a/.vib/influxdb/cypress/cypress/fixtures/sample_data/glucose_levels.txt b/.vib/influxdb/cypress/cypress/fixtures/sample_data/glucose_levels.txt deleted file mode 100644 index b6c2a76556..0000000000 --- a/.vib/influxdb/cypress/cypress/fixtures/sample_data/glucose_levels.txt +++ /dev/null @@ -1,4 +0,0 @@ -health,user=bitnami glucose=1 1660694480000000000 -health,user=bitnami glucose=0.7 1660694480000000000 -health,user=bitnami glucose=1.95 1660694480000000000 -health,user=bitnami glucose=2.43 1660694480000000000 \ No newline at end of file diff --git a/.vib/influxdb/cypress/cypress/support/commands.js b/.vib/influxdb/cypress/cypress/support/commands.js deleted file mode 100644 index 970b119a11..0000000000 --- a/.vib/influxdb/cypress/cypress/support/commands.js +++ /dev/null @@ -1,39 +0,0 @@ -/* - * Copyright Broadcom, Inc. All Rights Reserved. - * SPDX-License-Identifier: APACHE-2.0 - */ - -const COMMAND_DELAY = 2000; - -for (const command of ['click']) { - Cypress.Commands.overwrite(command, (originalFn, ...args) => { - const origVal = originalFn(...args); - - return new Promise((resolve) => { - setTimeout(() => { - resolve(origVal); - }, COMMAND_DELAY); - }); - }); -} - -Cypress.Commands.add('visitInOrg', (url) => { - // Retrieve current organization and use it to navigate - cy.url().then((currentUrl) => { - expect(currentUrl).to.contain('orgs'); - const org = currentUrl.match(/orgs\/(\w*)/)[1]; - const path = url.startsWith('/') ? url : `/${url}`; - cy.visit(`/orgs/${org}${path}`); - }); -}); - -Cypress.Commands.add( - 'login', - (username = Cypress.env('username'), password = Cypress.env('password')) => { - cy.visit('/signin'); - cy.get('[data-testid="username"]').should('be.enabled').type(username); - cy.get('[data-testid="password"]').should('be.enabled').type(`${password}{enter}`); - // The login process is not considered as completed until the UI is rendered - cy.get('[data-testid="user-nav"]'); - } -); diff --git a/.vib/influxdb/cypress/cypress/support/e2e.js b/.vib/influxdb/cypress/cypress/support/e2e.js deleted file mode 100644 index 56c00209c0..0000000000 --- a/.vib/influxdb/cypress/cypress/support/e2e.js +++ /dev/null @@ -1,25 +0,0 @@ -/* - * Copyright Broadcom, Inc. All Rights Reserved. - * SPDX-License-Identifier: APACHE-2.0 - */ - -// *********************************************************** -// This example support/index.js is processed and -// loaded automatically before your test files. -// -// This is a great place to put global configuration and -// behavior that modifies Cypress. -// -// You can change the location of this file or turn off -// automatically serving support files with the -// 'supportFile' configuration option. -// -// You can read more here: -// https://on.cypress.io/configuration -// *********************************************************** - -// Import commands.js using ES2015 syntax: -import './commands'; - -// Alternatively you can use CommonJS syntax: -// require('./commands') diff --git a/.vib/influxdb/cypress/cypress/support/utils.js b/.vib/influxdb/cypress/cypress/support/utils.js deleted file mode 100644 index 3a4162b113..0000000000 --- a/.vib/influxdb/cypress/cypress/support/utils.js +++ /dev/null @@ -1,15 +0,0 @@ -/* - * Copyright Broadcom, Inc. All Rights Reserved. - * SPDX-License-Identifier: APACHE-2.0 - */ - -/// - -export let random = (Math.random() + 1).toString(36).substring(7); - -export let selectOrg = (org = Cypress.env('org')) => { - cy.get('[data-testid="user-nav"]').click(); - cy.get('[data-testid="user-nav-item-switch-orgs"]').click(); - cy.contains('li', org).click(); - cy.contains('Get Started'); -}; diff --git a/.vib/influxdb/ginkgo/go.mod b/.vib/influxdb/ginkgo/go.mod index bf90d2756c..d7d1d40fd3 100644 --- a/.vib/influxdb/ginkgo/go.mod +++ b/.vib/influxdb/ginkgo/go.mod @@ -1,6 +1,6 @@ module test-influxdb-chart -go 1.23.0 +go 1.24.0 toolchain go1.24.1 @@ -10,50 +10,53 @@ require ( github.com/bitnami/charts/.vib/common-tests/ginkgo-utils v0.0.0-00010101000000-000000000000 github.com/onsi/ginkgo/v2 v2.23.3 github.com/onsi/gomega v1.36.2 - k8s.io/api v0.28.0 - k8s.io/apimachinery v0.28.0 - k8s.io/client-go v0.28.0 + k8s.io/api v0.33.1 + k8s.io/apimachinery v0.33.1 + k8s.io/client-go v0.33.1 ) require ( github.com/davecgh/go-spew v1.1.1 // indirect - github.com/emicklei/go-restful/v3 v3.9.0 // indirect + github.com/emicklei/go-restful/v3 v3.11.0 // indirect + github.com/fxamacker/cbor/v2 v2.7.0 // indirect github.com/go-logr/logr v1.4.2 // indirect - github.com/go-openapi/jsonpointer v0.19.6 // indirect + github.com/go-openapi/jsonpointer v0.21.0 // indirect github.com/go-openapi/jsonreference v0.20.2 // indirect - github.com/go-openapi/swag v0.22.3 // indirect + github.com/go-openapi/swag v0.23.0 // indirect github.com/go-task/slim-sprig/v3 v3.0.0 // indirect github.com/gogo/protobuf v1.3.2 // indirect - github.com/golang/protobuf v1.5.3 // indirect - github.com/google/gnostic-models v0.6.8 // indirect - github.com/google/go-cmp v0.6.0 // indirect - github.com/google/gofuzz v1.2.0 // indirect + github.com/google/gnostic-models v0.6.9 // indirect + github.com/google/go-cmp v0.7.0 // indirect github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad // indirect - github.com/google/uuid v1.3.0 // indirect - github.com/imdario/mergo v0.3.6 // indirect + github.com/google/uuid v1.6.0 // indirect + github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 // indirect github.com/josharian/intern v1.0.0 // indirect github.com/json-iterator/go v1.1.12 // indirect github.com/mailru/easyjson v0.7.7 // indirect + github.com/moby/spdystream v0.5.0 // indirect github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd // indirect github.com/modern-go/reflect2 v1.0.2 // indirect github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 // indirect + github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f // indirect + github.com/pkg/errors v0.9.1 // indirect github.com/spf13/pflag v1.0.5 // indirect + github.com/x448/float16 v0.8.4 // indirect golang.org/x/net v0.38.0 // indirect - golang.org/x/oauth2 v0.8.0 // indirect + golang.org/x/oauth2 v0.27.0 // indirect golang.org/x/sys v0.31.0 // indirect golang.org/x/term v0.30.0 // indirect golang.org/x/text v0.23.0 // indirect - golang.org/x/time v0.3.0 // indirect + golang.org/x/time v0.9.0 // indirect golang.org/x/tools v0.30.0 // indirect - google.golang.org/appengine v1.6.7 // indirect - google.golang.org/protobuf v1.36.1 // indirect + google.golang.org/protobuf v1.36.5 // indirect + gopkg.in/evanphx/json-patch.v4 v4.12.0 // indirect gopkg.in/inf.v0 v0.9.1 // indirect - gopkg.in/yaml.v2 v2.4.0 // indirect gopkg.in/yaml.v3 v3.0.1 // indirect - k8s.io/klog/v2 v2.100.1 // indirect - k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 // indirect - k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 // indirect - sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd // indirect - sigs.k8s.io/structured-merge-diff/v4 v4.2.3 // indirect - sigs.k8s.io/yaml v1.3.0 // indirect + k8s.io/klog/v2 v2.130.1 // indirect + k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff // indirect + k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 // indirect + sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 // indirect + sigs.k8s.io/randfill v1.0.0 // indirect + sigs.k8s.io/structured-merge-diff/v4 v4.6.0 // indirect + sigs.k8s.io/yaml v1.4.0 // indirect ) diff --git a/.vib/influxdb/ginkgo/go.sum b/.vib/influxdb/ginkgo/go.sum index da9c9da757..074607f7de 100644 --- a/.vib/influxdb/ginkgo/go.sum +++ b/.vib/influxdb/ginkgo/go.sum @@ -1,40 +1,39 @@ +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5 h1:0CwZNZbxp69SHPdPJAN/hZIm0C4OItdklCFmMRWYpio= +github.com/armon/go-socks5 v0.0.0-20160902184237-e75332964ef5/go.mod h1:wHh0iHkYZB8zMSxRWpUBQtwG5a7fFgvEO+odwuTv2gs= github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E= github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c= github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38= -github.com/emicklei/go-restful/v3 v3.9.0 h1:XwGDlfxEnQZzuopoqxwSEllNcCOM9DhhFyhFIIGKwxE= -github.com/emicklei/go-restful/v3 v3.9.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= -github.com/go-logr/logr v1.2.0/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A= +github.com/emicklei/go-restful/v3 v3.11.0 h1:rAQeMHw1c7zTmncogyy8VvRZwtkmkZ4FxERmMY4rD+g= +github.com/emicklei/go-restful/v3 v3.11.0/go.mod h1:6n3XBCmQQb25CM2LCACGz8ukIrRry+4bhvbpWn3mrbc= +github.com/fxamacker/cbor/v2 v2.7.0 h1:iM5WgngdRBanHcxugY4JySA0nk1wZorNOpTgCMedv5E= +github.com/fxamacker/cbor/v2 v2.7.0/go.mod h1:pxXPTn3joSm21Gbwsv0w9OSA2y1HFR9qXEeXQVeNoDQ= github.com/go-logr/logr v1.4.2 h1:6pFjapn8bFcIbiKo3XT4j/BhANplGihG6tvd+8rYgrY= github.com/go-logr/logr v1.4.2/go.mod h1:9T104GzyrTigFIr8wt5mBrctHMim0Nb2HLGrmQ40KvY= -github.com/go-openapi/jsonpointer v0.19.6 h1:eCs3fxoIi3Wh6vtgmLTOjdhSpiqphQ+DaPn38N2ZdrE= github.com/go-openapi/jsonpointer v0.19.6/go.mod h1:osyAmYz/mB/C3I+WsTTSgw1ONzaLJoLCyoi6/zppojs= +github.com/go-openapi/jsonpointer v0.21.0 h1:YgdVicSA9vH5RiHs9TZW5oyafXZFc6+2Vc1rr/O9oNQ= +github.com/go-openapi/jsonpointer v0.21.0/go.mod h1:IUyH9l/+uyhIYQ/PXVA41Rexl+kOkAPDdXEYns6fzUY= github.com/go-openapi/jsonreference v0.20.2 h1:3sVjiK66+uXK/6oQ8xgcRKcFgQ5KXa2KvnJRumpMGbE= github.com/go-openapi/jsonreference v0.20.2/go.mod h1:Bl1zwGIM8/wsvqjsOQLJ/SH+En5Ap4rVB5KVcIDZG2k= -github.com/go-openapi/swag v0.22.3 h1:yMBqmnQ0gyZvEb/+KzuWZOXgllrXT4SADYbvDaXHv/g= github.com/go-openapi/swag v0.22.3/go.mod h1:UzaqsxGiab7freDnrUUra0MwWfN/q7tE4j+VcZ0yl14= +github.com/go-openapi/swag v0.23.0 h1:vsEVJDUo2hPJ2tu0/Xc+4noaxyEffXNIs3cOULZ+GrE= +github.com/go-openapi/swag v0.23.0/go.mod h1:esZ8ITTYEsH1V2trKHjAN8Ai7xHb8RV+YSZ577vPjgQ= github.com/go-task/slim-sprig/v3 v3.0.0 h1:sUs3vkvUymDpBKi3qH1YSqBQk9+9D/8M2mN1vB6EwHI= github.com/go-task/slim-sprig/v3 v3.0.0/go.mod h1:W848ghGpv3Qj3dhTPRyJypKRiqCdHZiAzKg9hl15HA8= github.com/gogo/protobuf v1.3.2 h1:Ov1cvc58UF3b5XjBnZv7+opcTcQFZebYjWzi34vdm4Q= github.com/gogo/protobuf v1.3.2/go.mod h1:P1XiOD3dCwIKUDQYPy72D8LYyHL2YPYrpS2s69NZV8Q= -github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U= -github.com/golang/protobuf v1.5.0/go.mod h1:FsONVRAS9T7sI+LIUmWTfcYkHO4aIWwzhcaSAoJOfIk= -github.com/golang/protobuf v1.5.3 h1:KhyjKVUg7Usr/dYsdSqoFveMYd5ko72D+zANwlG1mmg= -github.com/golang/protobuf v1.5.3/go.mod h1:XVQd3VNwM+JqD3oG2Ue2ip4fOMUkwXdXDdiuN0vRsmY= -github.com/google/gnostic-models v0.6.8 h1:yo/ABAfM5IMRsS1VnXjTBvUb61tFIHozhlYvRgGre9I= -github.com/google/gnostic-models v0.6.8/go.mod h1:5n7qKqH0f5wFt+aWF8CW6pZLLNOfYuF5OpfBSENuI8U= -github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE= -github.com/google/go-cmp v0.6.0 h1:ofyhxvXcZhMsU5ulbFiLKl/XBFqE1GSq7atu8tAmTRI= -github.com/google/go-cmp v0.6.0/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/gnostic-models v0.6.9 h1:MU/8wDLif2qCXZmzncUQ/BOfxWfthHi63KqpoNbWqVw= +github.com/google/gnostic-models v0.6.9/go.mod h1:CiWsm0s6BSQd1hRn8/QmxqB6BesYcbSZxsz9b0KuDBw= +github.com/google/go-cmp v0.5.9/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY= +github.com/google/go-cmp v0.7.0 h1:wk8382ETsv4JYUZwIsn6YpYiWiBsYLSJiTsyBybVuN8= +github.com/google/go-cmp v0.7.0/go.mod h1:pXiqmnSA92OHEEa9HXL2W4E7lf9JzCmGVUdgjX3N/iU= github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= -github.com/google/gofuzz v1.2.0 h1:xRy4A+RhZaiKjJ1bPfwQ8sedCA+YS2YcCHW6ec7JMi0= -github.com/google/gofuzz v1.2.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg= github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad h1:a6HEuzUHeKH6hwfN/ZoQgRgVIWFJljSWa/zetS2WTvg= github.com/google/pprof v0.0.0-20241210010833-40e02aabc2ad/go.mod h1:vavhavw2zAxS5dIdcRluK6cSGGPlZynqzFM8NdvU144= -github.com/google/uuid v1.3.0 h1:t6JiXgmwXMjEs8VusXIJk2BXHsn+wx8BZdTaoZ5fu7I= -github.com/google/uuid v1.3.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= -github.com/imdario/mergo v0.3.6 h1:xTNEAn+kxVO7dTZGu0CegyqKZmoWFI0rF8UxjlB2d28= -github.com/imdario/mergo v0.3.6/go.mod h1:2EnlNZ0deacrJVfApfmtdGgDfMuh/nq6Ok1EcJh5FfA= +github.com/google/uuid v1.6.0 h1:NIvaJDMOsjHA8n1jAhLSgzrAzy1Hgr+hNrb57e+94F0= +github.com/google/uuid v1.6.0/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674 h1:JeSE6pjso5THxAzdVpqr6/geYxZytqFMBCOtn/ujyeo= +github.com/gorilla/websocket v1.5.4-0.20250319132907-e064f32e3674/go.mod h1:r4w70xmWCQKmi1ONH4KIaBptdivuRPyosB9RmPlGEwA= github.com/josharian/intern v1.0.0 h1:vlS4z54oSdjm0bgjRigI+G1HpF+tI+9rE5LLzOg8HmY= github.com/josharian/intern v1.0.0/go.mod h1:5DoeVV0s6jJacbCEi61lwdGj/aVlrQvzHFFd8Hwg//Y= github.com/json-iterator/go v1.1.12 h1:PV8peI4a0ysnczrg+LtxykD8LfKY9ML6u2jnxaEnrnM= @@ -50,6 +49,8 @@ github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY= github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE= github.com/mailru/easyjson v0.7.7 h1:UGYAvKxe3sBsEDzO8ZeWOSlIQfWFlxbzLZe7hwFURr0= github.com/mailru/easyjson v0.7.7/go.mod h1:xzfreul335JAWq5oZzymOObrkdz5UnU4kGfJJLY9Nlc= +github.com/moby/spdystream v0.5.0 h1:7r0J1Si3QO/kjRitvSLVVFUjxMEb/YLj6S9FF62JBCU= +github.com/moby/spdystream v0.5.0/go.mod h1:xBAYlnt/ay+11ShkdFKNAG7LsyK/tmNBVvVOwrfMgdI= github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd h1:TRLaZ9cD/w8PVh93nsPXa1VrQ6jlwL5oN8l14QlcNfg= github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q= @@ -57,25 +58,33 @@ github.com/modern-go/reflect2 v1.0.2 h1:xBagoLtFs94CBntxluKeaWgTMpvLxC4ur3nMaC9G github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822 h1:C3w9PqII01/Oq1c1nUAm88MOHcQC9l5mIlSMApZMrHA= github.com/munnerz/goautoneg v0.0.0-20191010083416-a7dc8b61c822/go.mod h1:+n7T8mK8HuQTcFwEeznm/DIxMOiR9yIdICNftLE1DvQ= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f h1:y5//uYreIhSUg3J1GEMiLbxo1LJaP8RfCpH6pymGZus= +github.com/mxk/go-flowrate v0.0.0-20140419014527-cca7078d478f/go.mod h1:ZdcZmHo+o7JKHSa8/e818NopupXU1YMK5fe1lsApnBw= github.com/onsi/ginkgo/v2 v2.23.3 h1:edHxnszytJ4lD9D5Jjc4tiDkPBZ3siDeJJkUZJJVkp0= github.com/onsi/ginkgo/v2 v2.23.3/go.mod h1:zXTP6xIp3U8aVuXN8ENK9IXRaTjFnpVB9mGmaSRvxnM= github.com/onsi/gomega v1.36.2 h1:koNYke6TVk6ZmnyHrCXba/T/MoLBXFjeC1PtvYgw0A8= github.com/onsi/gomega v1.36.2/go.mod h1:DdwyADRjrc825LhMEkD76cHR5+pUnjhUN8GlHlRPHzY= +github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4= +github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0= github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM= github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4= -github.com/rogpeppe/go-internal v1.10.0 h1:TMyTOH3F/DB16zRVcYyreMH6GnZZrwQVAoYjRBZyWFQ= -github.com/rogpeppe/go-internal v1.10.0/go.mod h1:UQnix2H7Ngw/k4C5ijL5+65zddjncjaFoBhdsK/akog= +github.com/rogpeppe/go-internal v1.13.1 h1:KvO1DLK/DRN07sQ1LQKScxyZJuNnedQ5/wKSR38lUII= +github.com/rogpeppe/go-internal v1.13.1/go.mod h1:uMEvuHeurkdAXX61udpOXGD/AzZDWNMNyH2VO9fmH0o= github.com/spf13/pflag v1.0.5 h1:iy+VFUOCP1a+8yFto/drg2CJ5u0yRoB7fZw3DKv/JXA= github.com/spf13/pflag v1.0.5/go.mod h1:McXfInJRrz4CZXVZOBLb0bTZqETkiAhM9Iw0y3An2Bg= github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME= github.com/stretchr/objx v0.4.0/go.mod h1:YvHI0jy2hoMjB+UWwv71VJQ9isScKT/TqJzVSSt89Yw= github.com/stretchr/objx v0.5.0/go.mod h1:Yh+to48EsGEfYuaHDzXPcE3xhTkx73EhmCGUpEOglKo= +github.com/stretchr/objx v0.5.2 h1:xuMeJ0Sdp5ZMRXx/aWO6RZxdr3beISkG5/G/aIRr3pY= +github.com/stretchr/objx v0.5.2/go.mod h1:FRsXN1f5AsAjCGJKqEizvkpNtU+EGNCLh3NxZ/8L+MA= github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI= github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg= github.com/stretchr/testify v1.8.0/go.mod h1:yNjHg4UonilssWZ8iaSj1OCr/vHnekPRkoO+kdMU+MU= github.com/stretchr/testify v1.8.1/go.mod h1:w2LPCIKwWwSfY2zedu0+kehJoqGctiVI29o6fzry7u4= -github.com/stretchr/testify v1.8.4 h1:CcVxjf3Q8PM0mHUKJCdn+eZZtm5yQwehR5yeSVQQcUk= -github.com/stretchr/testify v1.8.4/go.mod h1:sz/lmYIOXD/1dqDmKjjqLyZ2RngseejIcXlSw2iwfAo= +github.com/stretchr/testify v1.10.0 h1:Xv5erBjTwe/5IxqUQTdXv5kgmIvbHo3QQyRwhJsOfJA= +github.com/stretchr/testify v1.10.0/go.mod h1:r2ic/lqez/lEtzL7wO/rwa5dbSLXVDPFyf8C91i36aY= +github.com/x448/float16 v0.8.4 h1:qLwI1I70+NjRFUR3zs1JPUCgaCXSh3SW62uAKT1mSBM= +github.com/x448/float16 v0.8.4/go.mod h1:14CWIYCyZA/cWjXOioeEpHeN/83MdbZDRQHoFcYsOfg= github.com/yuin/goldmark v1.1.27/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= github.com/yuin/goldmark v1.2.1/go.mod h1:3hX8gzYuyVAZsxl0MRgGTJEmQBFcNTphYh9decYSb74= golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w= @@ -84,14 +93,13 @@ golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPh golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA= golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg= -golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks= golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20200226121028-0de0cce0169b/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s= golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU= golang.org/x/net v0.38.0 h1:vRMAPTMaeGqVhG5QyLJHqNDwecKTomGeqbnfZyKlBI8= golang.org/x/net v0.38.0/go.mod h1:ivrbrMbzFq5J41QOQh0siUuly180yBYtLp+CKbEaFx8= -golang.org/x/oauth2 v0.8.0 h1:6dkIjl3j3LtZ/O3sTgZTMsLKSftL/B8Zgq4huOIIUu8= -golang.org/x/oauth2 v0.8.0/go.mod h1:yr7u4HXZRm1R1kBWqr/xKNqewf0plRYoB7sla+BCIXE= +golang.org/x/oauth2 v0.27.0 h1:da9Vo7/tDv5RH/7nZDz1eMGS/q1Vv1N/7FCrBhI9I3M= +golang.org/x/oauth2 v0.27.0/go.mod h1:onh5ek6nERTohokkhCD/y2cV4Do3fxFHFuAejCkRWT8= golang.org/x/sync v0.0.0-20190423024810-112230192c58/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20190911185100-cd5d95a43a6e/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM= @@ -103,12 +111,11 @@ golang.org/x/sys v0.31.0/go.mod h1:BJP2sWEmIv4KK5OTEluFJCKSidICx8ciO85XgH3Ak8k= golang.org/x/term v0.30.0 h1:PQ39fJZ+mfadBm0y5WlL4vlM7Sx1Hgf13sMIY2+QS9Y= golang.org/x/term v0.30.0/go.mod h1:NYYFdzHoI5wRh/h5tDMdMqCqPJZEuNqVR5xJLd/n67g= golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ= -golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk= golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ= golang.org/x/text v0.23.0 h1:D71I7dUrlY+VX0gQShAThNGHFxZ13dGLBHQLVl1mJlY= golang.org/x/text v0.23.0/go.mod h1:/BLNzu4aZCJ1+kcD0DNRotWKage4q2rGVAg4o22unh4= -golang.org/x/time v0.3.0 h1:rg5rLMjNzMS1RkNLzCG38eapWhnYLFYXDXj2gOlr8j4= -golang.org/x/time v0.3.0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ= +golang.org/x/time v0.9.0 h1:EsRrnYcQiGH+5FfbgvV4AP7qEZstoyrHB0DzarOQ4ZY= +golang.org/x/time v0.9.0/go.mod h1:3BpzKBy/shNhVucY/MWOyx10tF3SFh9QdLuxbVysPQM= golang.org/x/tools v0.0.0-20180917221912-90fa682c2a6e/go.mod h1:n7NCudcB/nEzxVGmLbDWY5pfWTLqBcC2KZ6jyYvM4mQ= golang.org/x/tools v0.0.0-20191119224855-298f0cb1881e/go.mod h1:b+2E5dAYhXwXZwtnZ6UAqBI28+e2cm9otk0dWdXHAEo= golang.org/x/tools v0.0.0-20200619180055-7c47624df98f/go.mod h1:EkVYQZoAsY45+roYkvgYkIh4xh/qjgUK9TdY2XT94GE= @@ -119,38 +126,36 @@ golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8T golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0= -google.golang.org/appengine v1.6.7 h1:FZR1q0exgwxzPzp/aF+VccGrSfxfPpkBqjIIEq3ru6c= -google.golang.org/appengine v1.6.7/go.mod h1:8WjMMxjGQR8xUklV/ARdw2HLXBOI7O7uCIDZVag1xfc= -google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw= -google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc= -google.golang.org/protobuf v1.36.1 h1:yBPeRvTftaleIgM3PZ/WBIZ7XM/eEYAaEyCwvyjq/gk= -google.golang.org/protobuf v1.36.1/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= +google.golang.org/protobuf v1.36.5 h1:tPhr+woSbjfYvY6/GPufUoYizxw1cF/yFoxJ2fmpwlM= +google.golang.org/protobuf v1.36.5/go.mod h1:9fA7Ob0pmnwhb644+1+CVWFRbNajQ6iRojtC/QF5bRE= gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c h1:Hei/4ADfdWqJk1ZMxUNpqntNwaWcugrBjAiHlqqRiVk= gopkg.in/check.v1 v1.0.0-20201130134442-10cb98267c6c/go.mod h1:JHkPIbrfpd72SG/EVd6muEfDQjcINNoR0C8j2r3qZ4Q= +gopkg.in/evanphx/json-patch.v4 v4.12.0 h1:n6jtcsulIzXPJaxegRbvFNNrZDjbij7ny3gmSPG+6V4= +gopkg.in/evanphx/json-patch.v4 v4.12.0/go.mod h1:p8EYWUEYMpynmqDbY58zCKCFZw8pRWMG4EsWvDvM72M= gopkg.in/inf.v0 v0.9.1 h1:73M5CoZyi3ZLMOyDlQh031Cx6N9NDJ2Vvfl76EDAgDc= gopkg.in/inf.v0 v0.9.1/go.mod h1:cWUDdTG/fYaXco+Dcufb5Vnc6Gp2YChqWtbxRZE0mXw= -gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI= -gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY= -gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ= gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= gopkg.in/yaml.v3 v3.0.1 h1:fxVm/GzAzEWqLHuvctI91KS9hhNmmWOoWu0XTYJS7CA= gopkg.in/yaml.v3 v3.0.1/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM= -k8s.io/api v0.28.0 h1:3j3VPWmN9tTDI68NETBWlDiA9qOiGJ7sdKeufehBYsM= -k8s.io/api v0.28.0/go.mod h1:0l8NZJzB0i/etuWnIXcwfIv+xnDOhL3lLW919AWYDuY= -k8s.io/apimachinery v0.28.0 h1:ScHS2AG16UlYWk63r46oU3D5y54T53cVI5mMJwwqFNA= -k8s.io/apimachinery v0.28.0/go.mod h1:X0xh/chESs2hP9koe+SdIAcXWcQ+RM5hy0ZynB+yEvw= -k8s.io/client-go v0.28.0 h1:ebcPRDZsCjpj62+cMk1eGNX1QkMdRmQ6lmz5BLoFWeM= -k8s.io/client-go v0.28.0/go.mod h1:0Asy9Xt3U98RypWJmU1ZrRAGKhP6NqDPmptlAzK2kMc= -k8s.io/klog/v2 v2.100.1 h1:7WCHKK6K8fNhTqfBhISHQ97KrnJNFZMcQvKp7gP/tmg= -k8s.io/klog/v2 v2.100.1/go.mod h1:y1WjHnz7Dj687irZUWR/WLkLc5N1YHtjLdmgWjndZn0= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9 h1:LyMgNKD2P8Wn1iAwQU5OhxCKlKJy0sHc+PcDwFB24dQ= -k8s.io/kube-openapi v0.0.0-20230717233707-2695361300d9/go.mod h1:wZK2AVp1uHCp4VamDVgBP2COHZjqD1T68Rf0CM3YjSM= -k8s.io/utils v0.0.0-20230406110748-d93618cff8a2 h1:qY1Ad8PODbnymg2pRbkyMT/ylpTrCM8P2RJ0yroCyIk= -k8s.io/utils v0.0.0-20230406110748-d93618cff8a2/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd h1:EDPBXCAspyGV4jQlpZSudPeMmr1bNJefnuqLsRAsHZo= -sigs.k8s.io/json v0.0.0-20221116044647-bc3834ca7abd/go.mod h1:B8JuhiUyNFVKdsE8h686QcCxMaH6HrOAZj4vswFpcB0= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3 h1:PRbqxJClWWYMNV1dhaG4NsibJbArud9kFxnAMREiWFE= -sigs.k8s.io/structured-merge-diff/v4 v4.2.3/go.mod h1:qjx8mGObPmV2aSZepjQjbmb2ihdVs8cGKBraizNC69E= -sigs.k8s.io/yaml v1.3.0 h1:a2VclLzOGrwOHDiV8EfBGhvjHvP46CtW5j6POvhYGGo= -sigs.k8s.io/yaml v1.3.0/go.mod h1:GeOyir5tyXNByN85N/dRIT9es5UQNerPYEKK56eTBm8= +k8s.io/api v0.33.1 h1:tA6Cf3bHnLIrUK4IqEgb2v++/GYUtqiu9sRVk3iBXyw= +k8s.io/api v0.33.1/go.mod h1:87esjTn9DRSRTD4fWMXamiXxJhpOIREjWOSjsW1kEHw= +k8s.io/apimachinery v0.33.1 h1:mzqXWV8tW9Rw4VeW9rEkqvnxj59k1ezDUl20tFK/oM4= +k8s.io/apimachinery v0.33.1/go.mod h1:BHW0YOu7n22fFv/JkYOEfkUYNRN0fj0BlvMFWA7b+SM= +k8s.io/client-go v0.33.1 h1:ZZV/Ks2g92cyxWkRRnfUDsnhNn28eFpt26aGc8KbXF4= +k8s.io/client-go v0.33.1/go.mod h1:JAsUrl1ArO7uRVFWfcj6kOomSlCv+JpvIsp6usAGefA= +k8s.io/klog/v2 v2.130.1 h1:n9Xl7H1Xvksem4KFG4PYbdQCQxqc/tTUyrgXaOhHSzk= +k8s.io/klog/v2 v2.130.1/go.mod h1:3Jpz1GvMt720eyJH1ckRHK1EDfpxISzJ7I9OYgaDtPE= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff h1:/usPimJzUKKu+m+TE36gUyGcf03XZEP0ZIKgKj35LS4= +k8s.io/kube-openapi v0.0.0-20250318190949-c8a335a9a2ff/go.mod h1:5jIi+8yX4RIb8wk3XwBo5Pq2ccx4FP10ohkbSKCZoK8= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738 h1:M3sRQVHv7vB20Xc2ybTt7ODCeFj6JSWYFzOFnYeS6Ro= +k8s.io/utils v0.0.0-20241104100929-3ea5e8cea738/go.mod h1:OLgZIPagt7ERELqWJFomSt595RzquPNLL48iOWgYOg0= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3 h1:/Rv+M11QRah1itp8VhT6HoVx1Ray9eB4DBr+K+/sCJ8= +sigs.k8s.io/json v0.0.0-20241010143419-9aa6b5e7a4b3/go.mod h1:18nIHnGi6636UCz6m8i4DhaJ65T6EruyzmoQqI2BVDo= +sigs.k8s.io/randfill v0.0.0-20250304075658-069ef1bbf016/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/randfill v1.0.0 h1:JfjMILfT8A6RbawdsK2JXGBR5AQVfd+9TbzrlneTyrU= +sigs.k8s.io/randfill v1.0.0/go.mod h1:XeLlZ/jmk4i1HRopwe7/aU3H5n1zNUcX6TM94b3QxOY= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0 h1:IUA9nvMmnKWcj5jl84xn+T5MnlZKThmUW1TdblaLVAc= +sigs.k8s.io/structured-merge-diff/v4 v4.6.0/go.mod h1:dDy58f92j70zLsuZVuUX5Wp9vtxXpaZnkPGWeqDfCps= +sigs.k8s.io/yaml v1.4.0 h1:Mk1wCc2gy/F0THH0TAp1QYyJNzRm2KCLy3o5ASXVI5E= +sigs.k8s.io/yaml v1.4.0/go.mod h1:Ejl7/uTz7PSA4eKMyQCUTnhZYNmLIl+5c2lQPGR2BPY= diff --git a/.vib/influxdb/ginkgo/influxdb_suite_test.go b/.vib/influxdb/ginkgo/influxdb_suite_test.go index 2141e6598b..fd03130873 100644 --- a/.vib/influxdb/ginkgo/influxdb_suite_test.go +++ b/.vib/influxdb/ginkgo/influxdb_suite_test.go @@ -19,22 +19,16 @@ var ( kubeconfig string deployName string namespace string - username string - password string - token string - org string - bucket string + database string timeoutSeconds int timeout time.Duration ) func init() { flag.StringVar(&kubeconfig, "kubeconfig", "", "absolute path to the kubeconfig file") - flag.StringVar(&deployName, "name", "", "name of the primary statefulset") + flag.StringVar(&deployName, "name", "", "name of the Influxdb deployment") flag.StringVar(&namespace, "namespace", "", "namespace where the application is running") - flag.StringVar(&org, "org", "", "admin organization") - flag.StringVar(&token, "token", "", "token for accessing the installation") - flag.StringVar(&bucket, "bucket", "", "bucket for inserting the data") + flag.StringVar(&database, "database", "", "name of the database to be used") flag.IntVar(&timeoutSeconds, "timeout", 120, "timeout in seconds") timeout = time.Duration(timeoutSeconds) * time.Second } @@ -44,7 +38,7 @@ func TestInfluxdb(t *testing.T) { RunSpecs(t, "Influxdb Persistence Test Suite") } -func createJob(ctx context.Context, c kubernetes.Interface, name string, port string, image string, stmt string) error { +func createJob(ctx context.Context, c kubernetes.Interface, name, port, image, action, token, query string) error { securityContext := &v1.SecurityContext{ Privileged: &[]bool{false}[0], AllowPrivilegeEscalation: &[]bool{false}[0], @@ -73,24 +67,17 @@ func createJob(ctx context.Context, c kubernetes.Interface, name string, port st Image: image, Command: []string{ "bash", "-ec", - stmt}, + fmt.Sprintf("influxdb3 %s --database %s --host $INFLUX_HOST --token $ADMIN_TOKEN '%s'", action, database, query), + }, Env: []v1.EnvVar{ - { - Name: "INFLUX_TOKEN", - Value: token, - }, - { - Name: "INFLUX_ORG", - Value: org, - }, - { - Name: "INFLUX_BUCKET_NAME", - Value: bucket, - }, { Name: "INFLUX_HOST", Value: fmt.Sprintf("http://%s:%s", deployName, port), }, + { + Name: "ADMIN_TOKEN", + Value: token, + }, }, SecurityContext: securityContext, }, diff --git a/.vib/influxdb/ginkgo/influxdb_test.go b/.vib/influxdb/ginkgo/influxdb_test.go index 35b8e433b0..fc30831987 100644 --- a/.vib/influxdb/ginkgo/influxdb_test.go +++ b/.vib/influxdb/ginkgo/influxdb_test.go @@ -12,6 +12,7 @@ import ( batchv1 "k8s.io/api/batch/v1" metav1 "k8s.io/apimachinery/pkg/apis/meta/v1" "k8s.io/client-go/kubernetes" + "k8s.io/client-go/rest" ) const ( @@ -20,28 +21,28 @@ const ( var _ = Describe("Influxdb", Ordered, func() { var c *kubernetes.Clientset + var conf *rest.Config var ctx context.Context var cancel context.CancelFunc BeforeEach(func() { ctx, cancel = context.WithCancel(context.Background()) - conf := utils.MustBuildClusterConfig(kubeconfig) + conf = utils.MustBuildClusterConfig(kubeconfig) c = kubernetes.NewForConfigOrDie(conf) }) - When("an index is created and Influxdb is scaled down to 0 replicas and back up", func() { - It("should have access to the created index", func() { - - getAvailableReplicas := func(ss *appsv1.Deployment) int32 { return ss.Status.AvailableReplicas } + When("time series data is written and Influxdb is scaled down to 0 replicas and back up", func() { + It("should have access to query the written data", func() { + getAvailableReplicas := func(deploy *appsv1.Deployment) int32 { return deploy.Status.AvailableReplicas } getSucceededJobs := func(j *batchv1.Job) int32 { return j.Status.Succeeded } getOpts := metav1.GetOptions{} By("checking all the replicas are available") - ss, err := c.AppsV1().Deployments(namespace).Get(ctx, deployName, getOpts) + deploy, err := c.AppsV1().Deployments(namespace).Get(ctx, deployName, getOpts) Expect(err).NotTo(HaveOccurred()) - Expect(ss.Status.Replicas).NotTo(BeZero()) - origReplicas := *ss.Spec.Replicas + Expect(deploy.Status.Replicas).NotTo(BeZero()) + origReplicas := *deploy.Spec.Replicas Eventually(func() (*appsv1.Deployment, error) { return c.AppsV1().Deployments(namespace).Get(ctx, deployName, getOpts) @@ -53,18 +54,28 @@ var _ = Describe("Influxdb", Ordered, func() { port, err := utils.SvcGetPortByName(svc, "http") Expect(err).NotTo(HaveOccurred()) - image, err := utils.DplGetContainerImage(ss, "influxdb") + image, err := utils.DplGetContainerImage(deploy, "influxdb") Expect(err).NotTo(HaveOccurred()) + // Let's obtain the token from the InfluxDB secret + secret, err := c.CoreV1().Secrets(namespace).Get(ctx, "influxdb", getOpts) + Expect(err).NotTo(HaveOccurred()) + + // The token is stored in the secret as a base64 encoded string + tokenBytes, ok := secret.Data["admin-token"] + Expect(ok).To(BeTrue()) + + // We don't need to decode the string, the Go K8s client does it for us + token := string(tokenBytes) + // Use current time for allowing the test suite to repeat jobSuffix := time.Now().Format("20060102150405") - By("creating a job to create a new index") - createDBJobName := fmt.Sprintf("%s-put-%s", + By("creating a job to write data") + createDBJobName := fmt.Sprintf("%s-write-%s", deployName, jobSuffix) - indexName := fmt.Sprintf("test%s", jobSuffix) - err = createJob(ctx, c, createDBJobName, port, image, fmt.Sprintf("influx write 'cpu_error,host=bitnami-server value=\"%s\"'", indexName)) + err = createJob(ctx, c, createDBJobName, port, image, "write", token, `home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1747036800`) Expect(err).NotTo(HaveOccurred()) Eventually(func() (*batchv1.Job, error) { @@ -72,7 +83,7 @@ var _ = Describe("Influxdb", Ordered, func() { }, timeout, PollingInterval).Should(WithTransform(getSucceededJobs, Equal(int32(1)))) By("scaling down to 0 replicas") - ss, err = utils.DplScale(ctx, c, ss, 0) + deploy, err = utils.DplScale(ctx, c, deploy, 0) Expect(err).NotTo(HaveOccurred()) Eventually(func() (*appsv1.Deployment, error) { @@ -80,21 +91,21 @@ var _ = Describe("Influxdb", Ordered, func() { }, timeout, PollingInterval).Should(WithTransform(getAvailableReplicas, BeZero())) By("scaling up to the original replicas") - ss, err = utils.DplScale(ctx, c, ss, origReplicas) + deploy, err = utils.DplScale(ctx, c, deploy, origReplicas) Expect(err).NotTo(HaveOccurred()) Eventually(func() (*appsv1.Deployment, error) { return c.AppsV1().Deployments(namespace).Get(ctx, deployName, getOpts) }, timeout, PollingInterval).Should(WithTransform(getAvailableReplicas, Equal(origReplicas))) - By("creating a job check the index") - deleteDBJobName := fmt.Sprintf("%s-get-%s", + By("creating a job to query the data") + queryJobName := fmt.Sprintf("%s-query-%s", deployName, jobSuffix) - err = createJob(ctx, c, deleteDBJobName, port, image, fmt.Sprintf("influx query 'from(bucket:\"%s\") |> range(start:-20m)' | grep %s", bucket, indexName)) + err = createJob(ctx, c, queryJobName, port, image, "query", token, "SELECT * FROM home") Expect(err).NotTo(HaveOccurred()) Eventually(func() (*batchv1.Job, error) { - return c.BatchV1().Jobs(namespace).Get(ctx, deleteDBJobName, getOpts) + return c.BatchV1().Jobs(namespace).Get(ctx, queryJobName, getOpts) }, timeout, PollingInterval).Should(WithTransform(getSucceededJobs, Equal(int32(1)))) }) }) diff --git a/.vib/influxdb/goss/goss.yaml b/.vib/influxdb/goss/goss.yaml index 3fb63fe411..b6182f4e95 100644 --- a/.vib/influxdb/goss/goss.yaml +++ b/.vib/influxdb/goss/goss.yaml @@ -7,20 +7,35 @@ file: filetype: directory mode: "2775" owner: root + # This file shouldn't exist given the token is created + # using a K8s Job + /bitnami/influxdb/.token: + exists: false +addr: + tcp://influxdb:{{ .Vars.service.ports.http }}: + reachable: true + timeout: 10000 + tcp://127.0.0.1:{{ .Vars.containerPorts.http }}: + reachable: true + timeout: 10000 command: - {{- $org := .Vars.auth.user.org }} - {{- $bucket := .Vars.auth.user.bucket }} - {{- $port := .Vars.influxdb.service.ports.http }} - {{- $adminToken := .Vars.auth.admin.token }} - {{- $user := .Vars.auth.user.username }} - {{- $msg := printf "error_%s" (randAlpha 5) }} - influx-write-read: - exec: export INFLUX_TOKEN='{{ $adminToken }}' && influx write --host http://influxdb:{{ $port }} --org {{ $org }} --bucket {{ $bucket }} 'cpu_error,host=bitnami-server value="{{ $msg }}"' && export INFLUX_TOKEN=$(influx auth list | grep {{ $user }} | awk '{print $2}') && influx query --host http://influxdb:{{ $port }} --org {{ $org }} 'from(bucket:"{{ $bucket }}") |> range(start:-2m)' + influxdb3-show-databases: + exec: bash -c ". /opt/bitnami/scripts/influxdb-env.sh; influxdb3 show databases --host http://127.0.0.1:{{ .Vars.containerPorts.http }} --token \$INFLUXDB_ADMIN_TOKEN" exit-status: 0 stdout: - - "{{ $msg }}" - {{- $uid := .Vars.influxdb.containerSecurityContext.runAsUser }} - {{- $gid := .Vars.influxdb.podSecurityContext.fsGroup }} + - "{{ .Vars.databases }}" + influxdb3-write: + exec: bash -c ". /opt/bitnami/scripts/influxdb-env.sh; influxdb3 write --database {{ .Vars.databases }} --host http://influxdb:{{ .Vars.service.ports.http }} --token \$INFLUXDB_ADMIN_TOKEN 'home,room=Living\ Room temp=21.1,hum=35.9,co=0i 1747036800'" + exit-status: 0 + stdout: + - "success" + influxdb3-query: + exec: bash -c "sleep 3; . /opt/bitnami/scripts/influxdb-env.sh; influxdb3 query --database {{ .Vars.databases }} --host http://influxdb:{{ .Vars.service.ports.http }} --token \$INFLUXDB_ADMIN_TOKEN 'SELECT * FROM home'" + exit-status: 0 + stdout: + - "Living Room" + {{- $uid := .Vars.containerSecurityContext.runAsUser }} + {{- $gid := .Vars.podSecurityContext.fsGroup }} check-user-info: # The UID and GID should always be either the one specified as vars (always a bigger number that the default) # or the one randomly defined by openshift (larger values). Otherwise, the chart is still using the default value. diff --git a/.vib/influxdb/runtime-parameters.yaml b/.vib/influxdb/runtime-parameters.yaml index 1b913502df..fd3968c9b4 100644 --- a/.vib/influxdb/runtime-parameters.yaml +++ b/.vib/influxdb/runtime-parameters.yaml @@ -1,32 +1,154 @@ -auth: +objectStore: file +databases: foo +containerPorts: + http: 8181 +containerSecurityContext: enabled: true - admin: - username: influxAdmin - password: RootP4ssw0rd - token: 4dm1nT0k3n - org: primary - bucket: primary - createUserToken: true - user: - username: testUser - password: ComplicatedPassword123!4 - org: testOrganization - bucket: testBucket -influxdb: - service: - type: LoadBalancer - ports: - http: 80 - rpc: 8089 - containerPorts: - http: 8086 - rpc: 8089 - containerSecurityContext: - enabled: true - runAsUser: 1002 - podSecurityContext: - enabled: true - fsGroup: 1002 + runAsUser: 1002 +podSecurityContext: + enabled: true + fsGroup: 1002 +service: + type: LoadBalancer + ports: + http: 80 serviceAccount: create: true automountServiceAccountToken: true +# Block commented until https://github.com/influxdata/influxdb/issues/26425 is resolved +# objectStore: s3 +# bucket: influxdb +# s3: +# auth: +# accessKeyId: some-access-key-id +# secretAccessKey: some-secret-access-key +# existingSecret: seaweedfs +# defaultRegion: us-east-1 +# endpoint: http://seaweedfs:8333 +# args: +# - /opt/bitnami/scripts/influxdb/run.sh +# - --aws-allow-http +# - --aws-skip-signature +# extraDeploy: +# - | +# apiVersion: v1 +# kind: Secret +# metadata: +# name: seaweedfs +# labels: +# app.kubernetes.io/part-of: influxdb +# app.kubernetes.io/component: seaweedfs +# type: Opaque +# data: +# s3-access-key-id: {{ .Values.s3.auth.accessKeyId | b64enc | quote }} +# s3-secret-access-key: {{ .Values.s3.auth.secretAccessKey | b64enc | quote }} +# - apiVersion: apps/v1 +# kind: StatefulSet +# metadata: +# name: seaweedfs +# labels: +# app.kubernetes.io/part-of: influxdb +# app.kubernetes.io/component: seaweedfs +# spec: +# replicas: 1 +# selector: +# matchLabels: +# app.kubernetes.io/part-of: influxdb +# app.kubernetes.io/component: seaweedfs +# serviceName: seaweedfs +# template: +# metadata: +# labels: +# app.kubernetes.io/part-of: influxdb +# app.kubernetes.io/component: seaweedfs +# spec: +# initContainers: +# - name: auth-config-init +# image: docker.io/bitnami/seaweedfs:latest +# command: +# - bash +# args: +# - -ec +# - | +# cat > "/s3/config.json" <6.6.16 (2025-05-21) + +* [bitnami/influxdb] :zap: :arrow_up: Update dependency references (#33821) ([7d35e67](https://github.com/bitnami/charts/commit/7d35e670e5abfdb86f136ef4fd0882412b5e80e0)), closes [#33821](https://github.com/bitnami/charts/issues/33821) + +## 6.6.15 (2025-05-21) + +* [bitnami/influxdb] :zap: :arrow_up: Update dependency references (#33806) ([4a7dfd0](https://github.com/bitnami/charts/commit/4a7dfd0f7669ff4445772c6346c5e3b6bf0a92dc)), closes [#33806](https://github.com/bitnami/charts/issues/33806) ## 6.6.14 (2025-05-19) diff --git a/bitnami/influxdb/Chart.lock b/bitnami/influxdb/Chart.lock index a5ebaf4f17..da69afa74a 100644 --- a/bitnami/influxdb/Chart.lock +++ b/bitnami/influxdb/Chart.lock @@ -1,6 +1,6 @@ dependencies: - name: common repository: oci://registry-1.docker.io/bitnamicharts - version: 2.31.0 -digest: sha256:c4c9af4e0ca23cf2c549e403b2a2bba2c53a3557cee23da09fa4cdf710044c2c -generated: "2025-05-06T10:20:01.557583475+02:00" + version: 2.31.1 +digest: sha256:38d6de6fd62a10417ff51df8b2c5e0cf294de80fe393c4e9f3247ca6433718fa +generated: "2025-05-15T12:55:06.81713+02:00" diff --git a/bitnami/influxdb/Chart.yaml b/bitnami/influxdb/Chart.yaml index 38d9f25491..7d60699151 100644 --- a/bitnami/influxdb/Chart.yaml +++ b/bitnami/influxdb/Chart.yaml @@ -4,33 +4,28 @@ annotations: category: Database images: | - - name: aws-cli - image: docker.io/bitnami/aws-cli:2.27.20-debian-12-r1 - - name: azure-cli - image: docker.io/bitnami/azure-cli:2.73.0-debian-12-r0 - - name: google-cloud-sdk - image: docker.io/bitnami/google-cloud-sdk:0.523.0-debian-12-r0 - name: influxdb - image: docker.io/bitnami/influxdb:2.7.11-debian-12-r20 + image: docker.io/bitnami/influxdb:3.0.3-debian-12-r2 - name: os-shell image: docker.io/bitnami/os-shell:12-debian-12-r45 + - name: kubectl + image: docker.io/bitnami/kubectl:1.33.1-debian-12-r2 licenses: Apache-2.0 tanzuCategory: service apiVersion: v2 -appVersion: 2.7.11 +appVersion: 3.0.2 dependencies: - name: common repository: oci://registry-1.docker.io/bitnamicharts tags: - bitnami-common version: 2.x.x -description: InfluxDB(TM) is an open source time-series database. It is a core component - of the TICK (Telegraf, InfluxDB(TM), Chronograf, Kapacitor) stack. +description: InfluxDB(TM) Core is an open source time-series database. It is a core component of the FDAP (Apache Flight, DataFusion, Arrow, and Parquet) stack. home: https://bitnami.com icon: https://dyltqmyl993wv.cloudfront.net/assets/stacks/influxdb/img/influxdb-stack-220x234.png keywords: - influxdb -- tick +- fdap - database - timeseries maintainers: @@ -39,4 +34,4 @@ maintainers: name: influxdb sources: - https://github.com/bitnami/charts/tree/main/bitnami/influxdb -version: 6.6.16 +version: 7.0.0 diff --git a/bitnami/influxdb/README.md b/bitnami/influxdb/README.md index 986f6be43a..8c79dc1a45 100644 --- a/bitnami/influxdb/README.md +++ b/bitnami/influxdb/README.md @@ -1,10 +1,10 @@ - + -# Bitnami Stack for InfluxDB™ +# Bitnami Stack for InfluxDB™ Core -InfluxDB™ is an open source time-series database. It is a core component of the TICK (Telegraf, InfluxDB™, Chronograf, Kapacitor) stack. +InfluxDB™ Core is an open source time-series database. It is a core component of the FDAP (Apache Flight, DataFusion, Arrow, and Parquet) stack. -[Overview of InfluxDB™](https://www.influxdata.com/products/influxdb-overview) +[Overview of InfluxDB™ Core](https://www.influxdata.com/products/influxdb-overview) InfluxDB™ is a trademark owned by InfluxData, which is not affiliated with, and does not endorse, this site. @@ -14,7 +14,7 @@ InfluxDB™ is a trademark owned by InfluxData, which is not affiliated with helm install my-release oci://registry-1.docker.io/bitnamicharts/influxdb ``` -Looking to use InfluxDB™ in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the commercial edition of the Bitnami catalog. +Looking to use InfluxDB™ Core in production? Try [VMware Tanzu Application Catalog](https://bitnami.com/enterprise), the commercial edition of the Bitnami catalog. ## Introduction @@ -25,7 +25,6 @@ This chart bootstraps a [influxdb](https://github.com/bitnami/containers/tree/ma - Kubernetes 1.23+ - Helm 3.8.0+ - PV provisioner support in the underlying infrastructure -- ReadWriteMany volumes for deployment scaling ## Installing the Chart @@ -43,46 +42,7 @@ These commands deploy influxdb on the Kubernetes cluster in the default configur ## Configuration and installation details -### Resource requests and limits - -Bitnami charts allow setting resource requests and limits for all containers inside the chart deployment. These are inside the `resources` value (check parameter table). Setting requests is essential for production workloads and these should be adapted to your specific use case. - -To make this process easier, the chart contains the `resourcesPreset` values, which automatically sets the `resources` section according to different presets. Check these presets in [the bitnami/common chart](https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15). However, in production workloads using `resourcesPreset` is discouraged as it may not fully adapt to your specific needs. Find more information on container resource management in the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). - -### Update credentials - -Bitnami charts configure credentials at first boot. Any further change in the secrets or credentials require manual intervention. Follow these instructions: - -- Update the user password following [the upstream documentation](https://docs.influxdata.com/influxdb/v2/admin/users/change-password/) -- Update the password secret with the new values (replace the SECRET_NAME, ADMIN_PASSWORD and ADMIN_USER_TOKEN placeholders) - -```shell -kubectl create secret generic SECRET_NAME --from-literal=admin-user-password=PASSWORD --from-literal=admin-user-token=ADMIN_USER_TOKEN --dry-run -o yaml | kubectl apply -f - -``` - -### Prometheus metrics - -This chart can be integrated with Prometheus by setting `metrics.enabled` to `true`. This will expose the InfluxDB native Prometheus endpoint. Additionally, it will deploy a `metrics` service, which can be configured under the `metrics.service` section. This `metrics` service will have the necessary annotations to be automatically scraped by Prometheus. - -#### Prometheus requirements - -It is necessary to have a working installation of Prometheus or Prometheus Operator for the integration to work. Install the [Bitnami Prometheus helm chart](https://github.com/bitnami/charts/tree/main/bitnami/prometheus) or the [Bitnami Kube Prometheus helm chart](https://github.com/bitnami/charts/tree/main/bitnami/kube-prometheus) to easily have a working Prometheus in your cluster. - -#### Integration with Prometheus Operator - -The chart can deploy `ServiceMonitor` objects for integration with Prometheus Operator installations. To do so, set the value `metrics.serviceMonitor.enabled=true`. Ensure that the Prometheus Operator `CustomResourceDefinitions` are installed in the cluster or it will fail with the following error: - -```text -no matches for kind "ServiceMonitor" in version "monitoring.coreos.com/v1" -``` - -Install the [Bitnami Kube Prometheus helm chart](https://github.com/bitnami/charts/tree/main/bitnami/kube-prometheus) for having the necessary CRDs and the Prometheus Operator. - -### [Rolling VS Immutable tags](https://techdocs.broadcom.com/us/en/vmware-tanzu/application-catalog/tanzu-application-catalog/services/tac-doc/apps-tutorials-understand-rolling-tags-containers-index.html) - -It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. - -Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. +### Configure the way how to expose InfluxDB™ Core This chart installs a deployment with the following configuration: @@ -108,22 +68,62 @@ This chart installs a deployment with the following configuration: -------------- ``` -### Configure the way how to expose InfluxDB™ +- **Ingress**: The ingress controller must be installed in the Kubernetes cluster. Set `ingress.enabled=true` to expose InfluxDB™ Core through Ingress. +- **ClusterIP**: Exposes the service on a cluster-internal IP. Choosing this value makes the service only reachable from within the cluster. Set `service.type=ClusterIP` to choose this service type. +- **NodePort**: Exposes the service on each Node's IP at a static port (the NodePort). You'll be able to contact the NodePort service, from outside the cluster, by requesting `NodeIP:NodePort`. Set `service.type=NodePort` to choose this service type. +- **LoadBalancer**: Exposes the service externally using a cloud provider's load balancer. Set `service.type=LoadBalancer` to choose this service type. -- **Ingress**: The ingress controller must be installed in the Kubernetes cluster. Set `ingress.enabled=true` to expose InfluxDB™ through Ingress. -- **ClusterIP**: Exposes the service on a cluster-internal IP. Choosing this value makes the service only reachable from within the cluster. Set `influxdb.service.type=ClusterIP` to choose this service type. -- **NodePort**: Exposes the service on each Node's IP at a static port (the NodePort). You'll be able to contact the NodePort service, from outside the cluster, by requesting `NodeIP:NodePort`. Set `influxdb.service.type=NodePort` to choose this service type. -- **LoadBalancer**: Exposes the service externally using a cloud provider's load balancer. Set `influxdb.service.type=LoadBalancer` to choose this service type. +### Configure the Object Store -### Using custom configuration +InfluxDB™ Core supports different storage systems to store Parquet files (refer to [upstream documentation](https://docs.influxdata.com/influxdb3/core/reference/config-options/#object-store) for more information about the supported object stores) that we can divide into three categories: -This helm chart supports to customize the whole configuration file. +- Memory: This is the default object store. It stores all data in memory and is not persistent. This is suitable for testing and development purposes. +- File: This object store stores data in files on the local filesystem. +- Cloud: This object store stores data in a cloud provider's object storage service (e.g., AWS S3, Google Cloud Storage, Azure Blob Storage). -Add your custom configuration file to "files/conf" in your working directory. This file will be mounted as a configMap to the containers and it will be used for configuring InfluxDB™. +This chart allows you to configure the object store using the `objectStore` parameter. If you're using a Cloud storage, there are additional parameters to configure such as the Cloud specific credentials or the bucket name. -Alternatively, you can specify the InfluxDB™ configuration using the `influxdb.configuration` parameter. +### Resource requests and limits -In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `influxdb.existingConfiguration` parameter. Note that this will override the two previous options. +Bitnami charts allow setting resource requests and limits for all containers inside the chart deployment. These are inside the `resources` value (check parameter table). Setting requests is essential for production workloads and these should be adapted to your specific use case. + +To make this process easier, the chart contains the `resourcesPreset` values, which automatically sets the `resources` section according to different presets. Check these presets in [the bitnami/common chart](https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15). However, in production workloads using `resourcesPreset` is discouraged as it may not fully adapt to your specific needs. Find more information on container resource management in the [official Kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/). + +### Update credentials + +This chart supports creating a random admin token at first boot (not supported when using `memory` as object store) by setting both the `auth.enabled` and `createAdminTokenJob.enabled` parameters to `true`. +As an alternative, the chart also supports consuming credentials from an existing secret by setting the `auth.existingSecret` and `auth.existingSecretAdminTokenKey` parameters. However, please note that this is only supported if you have pre-populated data in your object store with an admin token already created. + +Any further change in the credentials require manual intervention, pleaser refer instructions below: + +- Create an admin token following [the upstream documentation](https://docs.influxdata.com/influxdb3/core/admin/tokens/admin/create) if no admin token was created during the first boot. +- Regenerate the admin token following [the upstream documentation](https://docs.influxdata.com/influxdb3/core/admin/tokens/admin/regenerate). + +> Note: please ensure you update the token in the secret used by the chart if you regenerate the token. + +### Prometheus metrics + +This chart can be integrated with Prometheus by setting `metrics.enabled` to `true`. This will add the required annotations on InfluxDB™ Core service to be automatically scraped by Prometheus. + +#### Prometheus requirements + +It is necessary to have a working installation of Prometheus or Prometheus Operator for the integration to work. Install the [Bitnami Prometheus helm chart](https://github.com/bitnami/charts/tree/main/bitnami/prometheus) or the [Bitnami Kube Prometheus helm chart](https://github.com/bitnami/charts/tree/main/bitnami/kube-prometheus) to easily have a working Prometheus in your cluster. + +#### Integration with Prometheus Operator + +The chart can deploy `ServiceMonitor` objects for integration with Prometheus Operator installations. To do so, set the value `metrics.serviceMonitor.enabled=true`. Ensure that the Prometheus Operator `CustomResourceDefinitions` are installed in the cluster or it will fail with the following error: + +```text +no matches for kind "ServiceMonitor" in version "monitoring.coreos.com/v1" +``` + +Install the [Bitnami Kube Prometheus helm chart](https://github.com/bitnami/charts/tree/main/bitnami/kube-prometheus) for having the necessary CRDs and the Prometheus Operator. + +### [Rolling VS Immutable tags](https://techdocs.broadcom.com/us/en/vmware-tanzu/application-catalog/tanzu-application-catalog/services/tac-doc/apps-tutorials-understand-rolling-tags-containers-index.html) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. ### Adding extra environment variables @@ -137,23 +137,15 @@ extraEnvVars: ### Initialize a fresh instance -The [Bitnami InfluxDB™](https://github.com/bitnami/containers/tree/main/bitnami/influxdb) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. +The [Bitnami InfluxDB™ Core](https://github.com/bitnami/containers/tree/main/bitnami/influxdb) image allows you to use your custom scripts to initialize a fresh instance (hhe allowed extension is `.sh`). In order to execute the scripts, you can specify custom scripts using the `initdbScripts` parameter. -Alternatively, you can specify custom scripts using the `influxdb.initdbScripts` parameter. - -In addition to these options, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `influxdb.initdbScriptsCM` parameter. Note that this will override the two previous options. parameter. - -The allowed extensions are `.sh`, and `.txt`. - -### Migrating InfluxDB 1.x data into 2.x format - -The [Bitnami InfluxDB™](https://github.com/bitnami/containers/tree/main/bitnami/influxdb) image allows you to migrate your InfluxDB 1.x data into 2.x format by setting the `INFLUXDB_INIT_MODE=upgrade` environment variable, and mounting the InfluxDB 1.x data into the container (let the initialization logic know where it is located with the `INFLUXDB_INIT_V1_DIR` variable). Do not point `INFLUXDB_INIT_V1_DIR` into `INFLUXDB_VOLUME_DIR` (default: `/bitnami/influxdb`), or the upgrade process will fail. +In addition to this option, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsCM` parameter. Note that this will override the two previous options. parameter. ### Setting Pod's affinity -This chart allows you to set your custom affinity using the `XXX.affinity` parameter(s). Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). +This chart allows you to set your custom affinity using the `affinity` parameter(s). Find more information about Pod's affinity in the [kubernetes documentation](https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity). -As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `XXX.podAffinityPreset`, `XXX.podAntiAffinityPreset`, or `XXX.nodeAffinityPreset` parameters. +As an alternative, you can use of the preset configurations for pod affinity, pod anti-affinity, and node affinity available at the [bitnami/common](https://github.com/bitnami/charts/tree/main/bitnami/common#affinities) chart. To do so, set the `podAffinityPreset`, `podAntiAffinityPreset`, or `nodeAffinityPreset` parameters. ### Backup and restore @@ -161,20 +153,17 @@ To back up and restore Helm chart deployments on Kubernetes, you need to back up ## Persistence -The data is persisted by default using PVC(s). You can disable the persistence setting the `persistence.enabled` parameter to `false`. -A default `StorageClass` is needed in the Kubernetes cluster to dynamically provision the volumes. Specify another StorageClass in the `persistence.storageClass` or set `persistence.existingClaim` if you have already existing persistent volumes to use. +When using `file` as object store, data can be persisted by default using PVC(s). You can disable the persistence setting the `persistence.enabled` parameter to `false`. -If you would like to define persistence settings for a backup volume that differ from the persistence settings for the database volume, you may do so under the `backup.persistence` section of the configuration by setting `backup.persistence.ownConfig` to `true`. The backup volume will otherwise be defined using the `persistence` parameter section. +A default `StorageClass` is needed in the Kubernetes cluster to dynamically provision the volumes. Specify another StorageClass in the `persistence.storageClass` or set `persistence.existingClaim` if you have already existing persistent volumes to use. ### Adjust permissions of persistent volume mountpoint As the images run as non-root by default, it is necessary to adjust the ownership of the persistent volumes so that the containers can write data into it. -By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. -As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. You can enable this **initContainer** by setting `defaultInitContainers.volumePermissions.enabled` to `true`. -You can enable this **initContainer** by setting `volumePermissions.enabled` to `true`. -There are K8s distribution, such as OpenShift, where you can dynamically define the UID to run this **initContainer**. To do so, set the `volumePermissions.securityContext.runAsUser` to `auto`. +There are K8s distribution, such as OpenShift, where you can dynamically define the UID to run this **initContainer**. To do so, set the `defaultInitContainers.volumePermissions.securityContext.runAsUser` to `auto`. ## Parameters @@ -185,320 +174,315 @@ There are K8s distribution, such as OpenShift, where you can dynamically define | `global.imageRegistry` | Global Docker image registry | `""` | | `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` | | `global.defaultStorageClass` | Global default StorageClass for Persistent Volume(s) | `""` | -| `global.storageClass` | DEPRECATED: use global.defaultStorageClass instead | `""` | | `global.security.allowInsecureImages` | Allows skipping image verification | `false` | | `global.compatibility.openshift.adaptSecurityContext` | Adapt the securityContext sections of the deployment to make them compatible with Openshift restricted-v2 SCC: remove runAsUser, runAsGroup and fsGroup and let the platform use their allowed default IDs. Possible values: auto (apply if the detected running cluster is Openshift), force (perform the adaptation always), disabled (do not perform adaptation) | `auto` | ### Common parameters -| Name | Description | Value | -| ------------------------ | ----------------------------------------------------------------------------------------------------- | --------------- | -| `kubeVersion` | Force target Kubernetes version (using Helm capabilities if not set) | `""` | -| `nameOverride` | String to partially override influxdb.fullname template with a string (will prepend the release name) | `""` | -| `fullnameOverride` | String to fully override influxdb.fullname template with a string | `""` | -| `namespaceOverride` | String to fully override common.names.namespace | `""` | -| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | -| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | -| `commonLabels` | Labels to add to all deployed objects | `{}` | -| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | -| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | -| `diagnosticMode.command` | Command to override all containers in the deployment | `["sleep"]` | -| `diagnosticMode.args` | Args to override all containers in the deployment | `["infinity"]` | +| Name | Description | Value | +| ------------------------ | --------------------------------------------------------------------------------------- | --------------- | +| `kubeVersion` | Override Kubernetes version | `""` | +| `apiVersions` | Override Kubernetes API versions reported by .Capabilities | `[]` | +| `nameOverride` | String to partially override common.names.name | `""` | +| `fullnameOverride` | String to fully override common.names.fullname | `""` | +| `namespaceOverride` | String to fully override common.names.namespace | `""` | +| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `extraDeploy` | Array of extra objects to deploy with the release | `[]` | +| `usePasswordFiles` | Mount credentials as files instead of using environment variables | `true` | +| `diagnosticMode.enabled` | Enable diagnostic mode (all probes will be disabled and the command will be overridden) | `false` | +| `diagnosticMode.command` | Command to override all containers in the chart release | `["sleep"]` | +| `diagnosticMode.args` | Args to override all containers in the chart release | `["infinity"]` | -### InfluxDB(TM) parameters +### InfluxDB(TM) Core parameters -| Name | Description | Value | -| ------------------------------------------------------------ | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | -| `image.registry` | InfluxDB(TM) image registry | `REGISTRY_NAME` | -| `image.repository` | InfluxDB(TM) image repository | `REPOSITORY_NAME/influxdb` | -| `image.digest` | InfluxDB(TM) image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `image.pullPolicy` | InfluxDB(TM) image pull policy | `IfNotPresent` | -| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | -| `image.debug` | Specify if debug logs should be enabled | `false` | -| `auth.enabled` | Enable/disable authentication (Variable to keep compatibility with InfluxDB(TM) v1, in v2 it will be ignored) | `true` | -| `auth.usePasswordFiles` | Whether to use files to provide secrets instead of env vars. | `true` | -| `auth.admin.username` | InfluxDB(TM) admin user name | `admin` | -| `auth.admin.password` | InfluxDB(TM) admin user's password | `""` | -| `auth.admin.token` | InfluxDB(TM) admin user's token. Only valid with InfluxDB(TM) v2 | `""` | -| `auth.admin.org` | InfluxDB(TM) admin user's org. Only valid with InfluxDB(TM) v2 | `primary` | -| `auth.admin.bucket` | InfluxDB(TM) admin user's bucket. Only valid with InfluxDB(TM) v2 | `primary` | -| `auth.admin.retention` | InfluxDB(TM) admin user's bucket retention. Only valid with InfluxDB(TM) v2 | `""` | -| `auth.createUserToken` | Whether to create tokens for the different users. Take into account these tokens are going to be created by CLI randomly and they will not be accessible from a secret. See more influxdb 2.0 [auth ref](https://docs.influxdata.com/influxdb/v2.0/security/tokens/) | `false` | -| `auth.user.username` | Name for InfluxDB(TM) user with 'admin' privileges on the bucket specified at `auth.user.bucket` and `auth.user.org` or `auth.admin.org` | `""` | -| `auth.user.password` | InfluxDB(TM) password for `user.name` user | `""` | -| `auth.user.org` | Org to be created on first run | `""` | -| `auth.user.bucket` | Bucket to be created on first run | `""` | -| `auth.readUser.username` | Name for InfluxDB(TM) user with 'read' privileges on the bucket specified at `auth.user.bucket` | `""` | -| `auth.readUser.password` | InfluxDB(TM) password for `auth.readUser.username` user | `""` | -| `auth.writeUser.username` | Name for InfluxDB(TM) user with 'read' privileges on the bucket specified at `auth.user.bucket` | `""` | -| `auth.writeUser.password` | InfluxDB(TM) password for `auth.writeUser.username` user | `""` | -| `auth.existingSecret` | Name of existing Secret object with InfluxDB(TM) credentials (`auth.admin.password`, `auth.user.password`, `auth.readUser.password`, and `auth.writeUser.password` will be ignored and picked up from this secret) | `""` | -| `influxdb.configuration` | Specify content for influxdb.conf | `""` | -| `influxdb.existingConfiguration` | Name of existing ConfigMap object with the InfluxDB(TM) configuration (`influxdb.configuration` will be ignored). | `""` | -| `influxdb.initdbScripts` | Dictionary of initdb scripts | `{}` | -| `influxdb.initdbScriptsCM` | Name of existing ConfigMap object with the initdb scripts (`influxdb.initdbScripts` will be ignored). | `""` | -| `influxdb.initdbScriptsSecret` | Secret with initdb scripts that contain sensitive information (Note: can be used with `initdbScriptsConfigMap` or `initdbScripts`) | `""` | -| `influxdb.podAffinityPreset` | InfluxDB(TM) Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `influxdb.podAntiAffinityPreset` | InfluxDB(TM) Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | -| `influxdb.nodeAffinityPreset.type` | InfluxDB(TM) Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `influxdb.nodeAffinityPreset.key` | InfluxDB(TM) Node label key to match Ignored if `affinity` is set. | `""` | -| `influxdb.nodeAffinityPreset.values` | InfluxDB(TM) Node label values to match. Ignored if `affinity` is set. | `[]` | -| `influxdb.affinity` | InfluxDB(TM) Affinity for pod assignment | `{}` | -| `influxdb.nodeSelector` | InfluxDB(TM) Node labels for pod assignment | `{}` | -| `influxdb.tolerations` | InfluxDB(TM) Tolerations for pod assignment | `[]` | -| `influxdb.podAnnotations` | Annotations for InfluxDB(TM) pods | `{}` | -| `influxdb.podLabels` | Extra labels for InfluxDB(TM) pods | `{}` | -| `influxdb.automountServiceAccountToken` | Mount Service Account token in pod | `false` | -| `influxdb.hostAliases` | InfluxDB(TM) pods host aliases | `[]` | -| `influxdb.revisionHistoryLimit` | InfluxDB(TM) statefulset/deployment revision history limit | `10` | -| `influxdb.updateStrategy.type` | InfluxDB(TM) statefulset/deployment strategy type | `RollingUpdate` | -| `influxdb.priorityClassName` | InfluxDB(TM) pods' priorityClassName | `""` | -| `influxdb.schedulerName` | Name of the k8s scheduler (other than default) | `""` | -| `influxdb.topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` | -| `influxdb.podManagementPolicy` | podManagementPolicy to manage scaling operation of InfluxDB(TM) pods | `OrderedReady` | -| `influxdb.podSecurityContext.enabled` | Enabled InfluxDB(TM) pods' Security Context | `true` | -| `influxdb.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | -| `influxdb.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | -| `influxdb.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | -| `influxdb.podSecurityContext.fsGroup` | Set InfluxDB(TM) pod's Security Context fsGroup | `1001` | -| `influxdb.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | -| `influxdb.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | -| `influxdb.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | -| `influxdb.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | -| `influxdb.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | -| `influxdb.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | -| `influxdb.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` | -| `influxdb.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | -| `influxdb.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | -| `influxdb.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | -| `influxdb.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if influxdb.resources is set (influxdb.resources is recommended for production). | `nano` | -| `influxdb.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | -| `influxdb.command` | Override default container command (useful when using custom images) | `[]` | -| `influxdb.args` | Override default container args (useful when using custom images) | `[]` | -| `influxdb.lifecycleHooks` | for the InfluxDB(TM) container(s) to automate configuration before or after startup | `{}` | -| `influxdb.extraEnvVars` | Array containing extra env vars to configure InfluxDB(TM) | `[]` | -| `influxdb.extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for InfluxDB(TM) nodes | `""` | -| `influxdb.extraEnvVarsSecret` | Name of existing Secret containing extra env vars for InfluxDB(TM) nodes | `""` | -| `influxdb.extraVolumes` | Array of extra volumes to be added to the deployment (evaluated as template). Requires setting extraVolumeMounts | `[]` | -| `influxdb.extraVolumeMounts` | Array of extra volume mounts to be added to the container (evaluated as template). Normally used with extraVolumes. | `[]` | -| `influxdb.containerPorts.http` | InfluxDB(TM) container HTTP port | `8086` | -| `influxdb.containerPorts.rpc` | InfluxDB(TM) container RPC port | `8088` | -| `influxdb.startupProbe.enabled` | Enable startupProbe | `false` | -| `influxdb.startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `180` | -| `influxdb.startupProbe.periodSeconds` | Period seconds for startupProbe | `45` | -| `influxdb.startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `30` | -| `influxdb.startupProbe.failureThreshold` | Failure threshold for startupProbe | `6` | -| `influxdb.startupProbe.successThreshold` | Success threshold for startupProbe | `1` | -| `influxdb.livenessProbe.enabled` | Enable livenessProbe | `true` | -| `influxdb.livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `180` | -| `influxdb.livenessProbe.periodSeconds` | Period seconds for livenessProbe | `45` | -| `influxdb.livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `30` | -| `influxdb.livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `6` | -| `influxdb.livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | -| `influxdb.readinessProbe.enabled` | Enable readinessProbe | `true` | -| `influxdb.readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `60` | -| `influxdb.readinessProbe.periodSeconds` | Period seconds for readinessProbe | `45` | -| `influxdb.readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `30` | -| `influxdb.readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `6` | -| `influxdb.readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | -| `influxdb.customStartupProbe` | Override default startup probe | `{}` | -| `influxdb.customLivenessProbe` | Override default liveness probe | `{}` | -| `influxdb.customReadinessProbe` | Override default readiness probe | `{}` | -| `influxdb.sidecars` | Add additional sidecar containers to the InfluxDB(TM) pod(s) | `[]` | -| `influxdb.initContainers` | Add additional init containers to the InfluxDB(TM) pod(s) | `[]` | -| `influxdb.pdb.create` | Enable/disable a Pod Disruption Budget creation | `true` | -| `influxdb.pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `""` | -| `influxdb.pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable. Defaults to `1` if both `influxdb.pdb.minAvailable` and `influxdb.pdb.maxUnavailable` are empty. | `""` | -| `influxdb.service.type` | Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) | `ClusterIP` | -| `influxdb.service.ports.http` | InfluxDB(TM) HTTP port | `8086` | -| `influxdb.service.ports.rpc` | InfluxDB(TM) RPC port | `8088` | -| `influxdb.service.nodePorts` | Specify the nodePort(s) value for the LoadBalancer and NodePort service types. | `{}` | -| `influxdb.service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `""` | -| `influxdb.service.loadBalancerSourceRanges` | Address that are allowed when service is LoadBalancer | `[]` | -| `influxdb.service.clusterIP` | Static clusterIP or None for headless services | `""` | -| `influxdb.service.externalTrafficPolicy` | InfluxDB(TM) service external traffic policy | `Cluster` | -| `influxdb.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | -| `influxdb.service.annotations` | Annotations for InfluxDB(TM) service | `{}` | -| `influxdb.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | -| `influxdb.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | - -### InfluxDB Collectd™ parameters - -| Name | Description | Value | -| ------------------------------------------- | ----------------------------------------------------------------------------------------- | ----------- | -| `collectd.enabled` | InfluxDB Collectd™ service enable | `false` | -| `collectd.service.type` | Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) | `ClusterIP` | -| `collectd.service.port` | InfluxDB Collectd™ UDP port (should match with corresponding port in influxdb.conf) | `25826` | -| `collectd.service.nodePort` | Kubernetes HTTP node port | `""` | -| `collectd.service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `""` | -| `collectd.service.loadBalancerSourceRanges` | Address that are allowed when service is LoadBalancer | `[]` | -| `collectd.service.clusterIP` | Static clusterIP or None for headless services | `""` | -| `collectd.service.externalTrafficPolicy` | InfluxDB Collectd™ service external traffic policy | `Cluster` | -| `collectd.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | -| `collectd.service.annotations` | Annotations for InfluxDB Collectd™ service | `{}` | -| `collectd.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | -| `collectd.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| Name | Description | Value | +| --------------------------------------------------- | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | +| `image.registry` | InfluxDB(TM) Core image registry | `REGISTRY_NAME` | +| `image.repository` | InfluxDB(TM) Core image repository | `REPOSITORY_NAME/influxdb` | +| `image.digest` | InfluxDB(TM) Core image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `image.pullPolicy` | InfluxDB(TM) Core image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `image.debug` | Specify if debug logs should be enabled | `false` | +| `auth.enabled` | Enable bearer token authentication on InfluxDB(TM) Core server | `true` | +| `auth.existingSecret` | Name of existing Secret containing the admin token (only supported if store data is pre-populated) | `""` | +| `auth.existingSecretAdminTokenKey` | Name of the key inside the existing secret containing the admin token (admin-token as default if not provided) | `""` | +| `tls.enabled` | Enable TLS configuration for InfluxDB(TM) Core | `false` | +| `tls.autoGenerated.enabled` | Enable automatic generation of TLS certificates | `true` | +| `tls.autoGenerated.engine` | Mechanism to generate the certificates (allowed values: helm, cert-manager) | `helm` | +| `tls.autoGenerated.certManager.existingIssuer` | The name of an existing Issuer to use for generating the certificates (only for `cert-manager` engine) | `""` | +| `tls.autoGenerated.certManager.existingIssuerKind` | Existing Issuer kind, defaults to Issuer (only for `cert-manager` engine) | `""` | +| `tls.autoGenerated.certManager.keyAlgorithm` | Key algorithm for the certificates (only for `cert-manager` engine) | `RSA` | +| `tls.autoGenerated.certManager.keySize` | Key size for the certificates (only for `cert-manager` engine) | `2048` | +| `tls.autoGenerated.certManager.duration` | Duration for the certificates (only for `cert-manager` engine) | `2160h` | +| `tls.autoGenerated.certManager.renewBefore` | Renewal period for the certificates (only for `cert-manager` engine) | `360h` | +| `tls.ca` | CA certificate for TLS. Ignored if `tls.existingCASecret` is set | `""` | +| `tls.existingCASecret` | The name of an existing Secret containing the CA certificate for TLS | `""` | +| `tls.server.cert` | TLS certificate for InfluxDB(TM) Core servers. Ignored if `tls.server.existingSecret` is set | `""` | +| `tls.server.key` | TLS key for InfluxDB(TM) Core servers. Ignored if `tls.server.existingSecret` is set | `""` | +| `tls.server.existingSecret` | The name of an existing Secret containing the TLS certificates for InfluxDB(TM) Core servers | `""` | +| `objectStore` | InfluxDB(TM) Core object storage | `memory` | +| `nodeId` | InfluxDB(TM) Core node id | `0` | +| `databases` | Comma separated list of databases to create (ignored if `objectStore` is set to `memory`) | `""` | +| `bucket` | Name of the bucket to create (only when using a Cloud Provider for object storage) | `""` | +| `s3.auth.accessKeyId` | AWS S3 access key id | `""` | +| `s3.auth.secretAccessKey` | AWS S3 secret access key | `""` | +| `s3.auth.existingSecret` | Name of existing Secret containing AWS S3 credentials (overrides `s3.credentials.accessKeyId` and `s3.credentials.secretAccessKey`) | `""` | +| `s3.defaultRegion` | AWS S3 default region | `us-east-1` | +| `s3.endpoint` | AWS S3 endpoint | `""` | +| `google.auth.serviceAccountKey` | Google Cloud service account key (JSON format) | `""` | +| `google.auth.existingSecret` | Name of existing Secret containing Google Cloud credentials (overrides `google.auth.serviceAccountKey`) | `""` | +| `azure.auth.accessKey` | Microsoft Azure access key | `""` | +| `azure.auth.existingSecret` | Name of existing Secret containing Azure credentials (overrides `azure.credentials.accessKey`) | `""` | +| `azure.account` | Microsoft Azure account name | `""` | +| `replicaCount` | Number of InfluxDB(TM) Core replicas (ignored if `objectStore` is set to `file` or `memory`) | `1` | +| `initdbScripts` | Dictionary of initdb scripts | `{}` | +| `initdbScriptsCM` | Name of existing ConfigMap object with the initdb scripts (`initdbScripts` will be ignored). | `""` | +| `initdbScriptsSecret` | Secret with initdb scripts that contain sensitive information (Note: can be used with `initdbScriptsConfigMap` or `initdbScripts`) | `""` | +| `command` | Override default container command (useful when using custom images) | `[]` | +| `args` | Override default container args (useful when using custom images) | `[]` | +| `extraEnvVars` | Array with extra environment variables to add InfluxDB(TM) Core nodes | `[]` | +| `extraEnvVarsCM` | Name of existing ConfigMap containing extra env vars for InfluxDB(TM) Core nodes | `""` | +| `extraEnvVarsSecret` | Name of existing Secret containing extra env vars for InfluxDB(TM) Core nodes | `""` | +| `containerPorts.http` | InfluxDB(TM) Core container HTTP port | `8181` | +| `extraContainerPorts` | Optionally specify extra list of additional ports for InfluxDB(TM) Core nodes | `[]` | +| `resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if resources is set (resources is recommended for production). | `small` | +| `resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `podSecurityContext.enabled` | Enable InfluxDB(TM) Core pods' Security Context | `true` | +| `podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | +| `podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | +| `podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | +| `podSecurityContext.fsGroup` | Set InfluxDB(TM) Core pod's Security Context fsGroup | `1001` | +| `containerSecurityContext.enabled` | Enable InfluxDB(TM) Core containers' Security Context | `true` | +| `containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` | +| `containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `startupProbe.enabled` | Enable startupProbe on InfluxDB(TM) Core containers | `false` | +| `startupProbe.initialDelaySeconds` | Initial delay seconds for startupProbe | `10` | +| `startupProbe.periodSeconds` | Period seconds for startupProbe | `10` | +| `startupProbe.timeoutSeconds` | Timeout seconds for startupProbe | `1` | +| `startupProbe.failureThreshold` | Failure threshold for startupProbe | `3` | +| `startupProbe.successThreshold` | Success threshold for startupProbe | `1` | +| `livenessProbe.enabled` | Enable livenessProbe on InfluxDB(TM) Core containers | `true` | +| `livenessProbe.initialDelaySeconds` | Initial delay seconds for livenessProbe | `10` | +| `livenessProbe.periodSeconds` | Period seconds for livenessProbe | `10` | +| `livenessProbe.timeoutSeconds` | Timeout seconds for livenessProbe | `1` | +| `livenessProbe.failureThreshold` | Failure threshold for livenessProbe | `3` | +| `livenessProbe.successThreshold` | Success threshold for livenessProbe | `1` | +| `readinessProbe.enabled` | Enable readinessProbe on InfluxDB(TM) Core containers | `true` | +| `readinessProbe.initialDelaySeconds` | Initial delay seconds for readinessProbe | `10` | +| `readinessProbe.periodSeconds` | Period seconds for readinessProbe | `10` | +| `readinessProbe.timeoutSeconds` | Timeout seconds for readinessProbe | `1` | +| `readinessProbe.failureThreshold` | Failure threshold for readinessProbe | `3` | +| `readinessProbe.successThreshold` | Success threshold for readinessProbe | `1` | +| `customStartupProbe` | Override default startup probe | `{}` | +| `customLivenessProbe` | Override default liveness probe | `{}` | +| `customReadinessProbe` | Override default readiness probe | `{}` | +| `podAffinityPreset` | InfluxDB(TM) Core Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `podAntiAffinityPreset` | InfluxDB(TM) Core Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | +| `nodeAffinityPreset.type` | InfluxDB(TM) Core Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `nodeAffinityPreset.key` | InfluxDB(TM) Core Node label key to match Ignored if `affinity` is set. | `""` | +| `nodeAffinityPreset.values` | InfluxDB(TM) Core Node label values to match. Ignored if `affinity` is set. | `[]` | +| `affinity` | InfluxDB(TM) Core Affinity for pod assignment | `{}` | +| `nodeSelector` | InfluxDB(TM) Core Node labels for pod assignment | `{}` | +| `tolerations` | InfluxDB(TM) Core Tolerations for pod assignment | `[]` | +| `podAnnotations` | Annotations for InfluxDB(TM) Core pods | `{}` | +| `podLabels` | Extra labels for InfluxDB(TM) Core pods | `{}` | +| `automountServiceAccountToken` | Mount Service Account token in pod | `false` | +| `hostAliases` | InfluxDB(TM) Core pods host aliases | `[]` | +| `updateStrategy.type` | InfluxDB(TM) Core deployment strategy type | `RollingUpdate` | +| `priorityClassName` | InfluxDB(TM) Core pods' priorityClassName | `""` | +| `revisionHistoryLimit` | InfluxDB(TM) Core deployment revision history limit | `10` | +| `schedulerName` | Name of the k8s scheduler (other than default) | `""` | +| `topologySpreadConstraints` | Topology Spread Constraints for pod assignment | `[]` | +| `lifecycleHooks` | for the InfluxDB(TM) Core container(s) to automate configuration before or after startup | `{}` | +| `extraVolumeMounts` | Optionally specify extra list of additional volumeMounts for the InfluxDB(TM) Core pods | `[]` | +| `extraVolumes` | Optionally specify extra list of additional volumes for the InfluxDB(TM) Core pods | `[]` | +| `sidecars` | Add additional sidecar containers to the InfluxDB(TM) Core pod(s) | `[]` | +| `initContainers` | Add additional init-containers to the InfluxDB(TM) Core pod(s) | `[]` | +| `pdb.create` | Enable/disable a Pod Disruption Budget creation for InfluxDB(TM) Core pods | `true` | +| `pdb.minAvailable` | Minimum number/percentage of InfluxDB(TM) Core pods that should remain scheduled | `""` | +| `pdb.maxUnavailable` | Maximum number/percentage of InfluxDB(TM) Core pods that may be made unavailable. Defaults to `1` if both `pdb.minAvailable` and `pdb.maxUnavailable` are empty. | `""` | +| `autoscaling.vpa.enabled` | Enable VPA for InfluxDB(TM) Core | `false` | +| `autoscaling.vpa.annotations` | Annotations for VPA resource | `{}` | +| `autoscaling.vpa.controlledResources` | List of resources that the VPA can control. Defaults to cpu and memory | `[]` | +| `autoscaling.vpa.maxAllowed` | VPA max allowed resources for the pod | `{}` | +| `autoscaling.vpa.minAllowed` | VPA min allowed resources for the pod | `{}` | +| `autoscaling.vpa.updatePolicy.updateMode` | Autoscaling update policy | `Auto` | +| `autoscaling.hpa.enabled` | Enable HPA for InfluxDB(TM) Core (ignored if `objectStore` is set to `file` or `memory`) | `false` | +| `autoscaling.hpa.minReplicas` | Minimum number of replicas | `""` | +| `autoscaling.hpa.maxReplicas` | Maximum number of replicas | `""` | +| `autoscaling.hpa.targetCPU` | Target CPU utilization percentage | `""` | +| `autoscaling.hpa.targetMemory` | Target Memory utilization percentage | `""` | ### Exposing parameters -| Name | Description | Value | -| -------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | -| `ingress.enabled` | Enable ingress controller resource | `false` | -| `ingress.tls` | Create TLS Secret | `false` | -| `ingress.pathType` | Ingress path type | `ImplementationSpecific` | -| `ingress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` | -| `ingress.hostname` | Default host for the ingress resource (evaluated as template) | `influxdb.local` | -| `ingress.path` | Ingress path*' in order to use this | `/` | -| `ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | -| `ingress.extraHosts` | The list of additional hostnames to be covered with this ingress record. | `[]` | -| `ingress.extraPaths` | Additional arbitrary path/backend objects | `[]` | -| `ingress.extraTls` | The tls configuration for additional hostnames to be covered with this ingress record. | `[]` | -| `ingress.secrets` | If you're providing your own certificates, please use this to add the certificates as secrets | `[]` | -| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | -| `ingress.extraRules` | Additional rules to be covered with this ingress record | `[]` | +| Name | Description | Value | +| --------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------- | ------------------------ | +| `service.type` | Kubernetes service type | `ClusterIP` | +| `service.ports.http` | InfluxDB(TM) Core HTTP port | `8181` | +| `service.nodePorts.http` | Node port for HTTP | `""` | +| `service.sessionAffinity` | Control where client requests go, to the same pod or round-robin | `None` | +| `service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | +| `service.clusterIP` | InfluxDB(TM) Core service Cluster IP | `""` | +| `service.loadBalancerIP` | InfluxDB(TM) Core service Load Balancer IP | `""` | +| `service.loadBalancerSourceRanges` | InfluxDB(TM) service Load Balancer sources | `[]` | +| `service.externalTrafficPolicy` | InfluxDB(TM) Core service external traffic policy | `Cluster` | +| `service.extraPorts` | Extra port to expose on InfluxDB(TM) Core service | `[]` | +| `service.annotations` | Additional custom annotations for InfluxDB(TM) Core service | `{}` | +| `ingress.enabled` | Enable ingress record generation for InfluxDB(TM) Core | `false` | +| `ingress.ingressClassName` | IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) | `""` | +| `ingress.pathType` | Ingress path type | `ImplementationSpecific` | +| `ingress.apiVersion` | Force Ingress API version (automatically detected if not set) | `""` | +| `ingress.hostname` | Default host for the ingress record | `influxdb.local` | +| `ingress.path` | Default path for the ingress record | `/` | +| `ingress.annotations` | Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. | `{}` | +| `ingress.tls` | Enable TLS configuration for the host defined at `ingress.hostname` parameter | `false` | +| `ingress.selfSigned` | Create a TLS secret for this ingress record using self-signed certificates generated by Helm | `false` | +| `ingress.extraHosts` | An array with additional hostname(s) to be covered with the ingress record | `[]` | +| `ingress.extraPaths` | An array with additional arbitrary paths that may need to be added to the ingress under the main host | `[]` | +| `ingress.extraTls` | TLS configuration for additional hostname(s) to be covered with this ingress record | `[]` | +| `ingress.secrets` | Custom TLS certificates as secrets | `[]` | +| `ingress.extraRules` | Additional rules to be covered with this ingress record | `[]` | +| `networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `true` | +| `networkPolicy.allowExternal` | Don't require server label for connections | `true` | +| `networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | +| `networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | +| `networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | ### Metrics parameters -| Name | Description | Value | -| --------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | -| `metrics.enabled` | Enable the export of Prometheus metrics | `false` | -| `metrics.service.type` | Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) | `ClusterIP` | -| `metrics.service.port` | InfluxDB(TM) Prometheus port | `9122` | -| `metrics.service.nodePort` | Kubernetes HTTP node port | `""` | -| `metrics.service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `""` | -| `metrics.service.loadBalancerSourceRanges` | Address that are allowed when service is LoadBalancer | `[]` | -| `metrics.service.clusterIP` | Static clusterIP or None for headless services | `""` | -| `metrics.service.annotations` | Annotations for the Prometheus metrics service | `{}` | -| `metrics.service.externalTrafficPolicy` | Service external traffic policy | `Cluster` | -| `metrics.service.extraPorts` | Extra ports to expose (normally used with the `sidecar` value) | `[]` | -| `metrics.service.sessionAffinity` | Session Affinity for Kubernetes service, can be "None" or "ClientIP" | `None` | -| `metrics.service.sessionAffinityConfig` | Additional settings for the sessionAffinity | `{}` | -| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | -| `metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` | -| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | -| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | -| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | -| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | -| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | -| `metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` | -| `networkPolicy.enabled` | Specifies whether a NetworkPolicy should be created | `true` | -| `networkPolicy.allowExternal` | Don't require server label for connections | `true` | -| `networkPolicy.allowExternalEgress` | Allow the pod to access any range of port and all destinations. | `true` | -| `networkPolicy.extraIngress` | Add extra ingress rules to the NetworkPolicy | `[]` | -| `networkPolicy.extraEgress` | Add extra ingress rules to the NetworkPolicy | `[]` | -| `networkPolicy.ingressNSMatchLabels` | Labels to match to allow traffic from other namespaces | `{}` | -| `networkPolicy.ingressNSPodMatchLabels` | Pod labels to match to allow traffic from other namespaces | `{}` | -| `persistence.enabled` | Enable data persistence | `true` | -| `persistence.existingClaim` | Use a existing PVC which must be created manually before bound | `""` | -| `persistence.storageClass` | Specify the `storageClass` used to provision the volume | `""` | -| `persistence.accessModes` | Access mode of data volume | `["ReadWriteOnce"]` | -| `persistence.size` | Size of data volume | `8Gi` | -| `persistence.annotations` | Persistent Volume Claim annotations | `{}` | -| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | -| `serviceAccount.name` | Name of the service account to use. If not set and create is true, a name is generated using the fullname template. | `""` | -| `serviceAccount.automountServiceAccountToken` | Automount service account token for the server service account | `false` | -| `serviceAccount.annotations` | Annotations for service account. Evaluated as a template. Only used if `create` is `true`. | `{}` | -| `psp.create` | Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later | `false` | -| `rbac.create` | Create Role and RoleBinding (required for PSP to work) | `false` | +| Name | Description | Value | +| ------------------------------------------ | ------------------------------------------------------------------------------------------------------ | ------- | +| `metrics.enabled` | Enable the export of Prometheus metrics | `false` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Namespace in which Prometheus is running | `""` | +| `metrics.serviceMonitor.labels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `""` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `""` | +| `metrics.serviceMonitor.relabelings` | RelabelConfigs to apply to samples before scraping | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | MetricRelabelConfigs to apply to samples before ingestion | `[]` | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `{}` | +| `metrics.serviceMonitor.honorLabels` | honorLabels chooses the metric's labels on collisions with target labels | `false` | -### Volume permissions parameters +### Persistence parameters -| Name | Description | Value | -| -------------------------------------------------- | --------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | -| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume mountpoint to `runAsUser:fsGroup` | `false` | -| `volumePermissions.image.registry` | Init container volume-permissions image registry | `REGISTRY_NAME` | -| `volumePermissions.image.repository` | Init container volume-permissions image name | `REPOSITORY_NAME/os-shell` | -| `volumePermissions.image.digest` | Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `IfNotPresent` | -| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | -| `volumePermissions.securityContext.seLinuxOptions` | Set SELinux options in container | `{}` | -| `volumePermissions.securityContext.runAsUser` | User ID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | `0` | +| Name | Description | Value | +| --------------------------- | ---------------------------------------------------------------------------------------------------------------------------------------------- | ------------------- | +| `persistence.enabled` | Enable InfluxDB(TM) Core data persistence (ignored unless `objectStore` is set to `file`) | `true` | +| `persistence.existingClaim` | A manually managed Persistent Volume and Claim | `""` | +| `persistence.storageClass` | PVC Storage Class for InfluxDB(TM) Core data volume | `""` | +| `persistence.accessModes` | Persistent Volume Access Modes | `["ReadWriteOnce"]` | +| `persistence.size` | PVC Storage Request for InfluxDB(TM) Core data volume | `8Gi` | +| `persistence.dataSource` | Custom PVC data source | `{}` | +| `persistence.annotations` | Additional custom annotations for the PVC | `{}` | +| `persistence.selector` | Selector to match an existing Persistent Volume for InfluxDB(TM) Core data PVC. If set, the PVC can't have a PV dynamically provisioned for it | `{}` | +| `persistence.mountPath` | Mount path of the InfluxDB(TM) Core data volume | `/bitnami/influxdb` | -### InfluxDB(TM) backup parameters +### Default init-containers -| Name | Description | Value | -| ------------------------------------------------------------------ | ----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | ------------------------------------ | -| `backup.enabled` | Enable InfluxDB(TM) backup | `false` | -| `backup.directory` | Directory where backups are stored | `/backups` | -| `backup.retentionDays` | Retention time in days for backups (older backups are deleted) | `10` | -| `backup.persistence.ownConfig` | Prefer independent own persistence parameters to configure the backup volume | `false` | -| `backup.persistence.enabled` | Enable data persistence for backup volume | `true` | -| `backup.persistence.existingClaim` | Use a existing PVC which must be created manually before bound | `""` | -| `backup.persistence.storageClass` | Specify the `storageClass` used to provision the volume | `""` | -| `backup.persistence.accessModes` | Access mode of data volume | `["ReadWriteOnce"]` | -| `backup.persistence.size` | Size of data volume | `8Gi` | -| `backup.persistence.annotations` | Persistent Volume Claim annotations | `{}` | -| `backup.cronjob.schedule` | Schedule in Cron format to save snapshots | `0 2 * * *` | -| `backup.cronjob.historyLimit` | Number of successful finished jobs to retain | `1` | -| `backup.cronjob.caBundle.enabled` | Boolean flag to enable/disable the inclusion of a CA bundle for backup CronJob. | `false` | -| `backup.cronjob.caBundle.existingConfigMap` | Name of the existing ConfigMap that contains the CA bundle for SSL Communication. | `""` | -| `backup.cronjob.caBundle.mountPath` | The path inside the CronJob container where the CA bundle will be mounted. | `/opt/ca-certificates/ca-bundle.crt` | -| `backup.cronjob.caBundle.subPath` | The filename within the mountPath directory where the CA bundle will be available. | `ca-bundle.crt` | -| `backup.cronjob.podAnnotations` | Pod annotations | `{}` | -| `backup.cronjob.podSecurityContext.enabled` | Enable security context for InfluxDB(TM) backup pods | `true` | -| `backup.cronjob.podSecurityContext.fsGroupChangePolicy` | Set filesystem group change policy | `Always` | -| `backup.cronjob.podSecurityContext.sysctls` | Set kernel settings using the sysctl interface | `[]` | -| `backup.cronjob.podSecurityContext.supplementalGroups` | Set filesystem extra groups | `[]` | -| `backup.cronjob.podSecurityContext.fsGroup` | Group ID for the InfluxDB(TM) filesystem | `1001` | -| `backup.cronjob.containerSecurityContext.enabled` | Enabled containers' Security Context | `true` | -| `backup.cronjob.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | -| `backup.cronjob.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | -| `backup.cronjob.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | -| `backup.cronjob.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | -| `backup.cronjob.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | -| `backup.cronjob.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` | -| `backup.cronjob.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | -| `backup.cronjob.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | -| `backup.cronjob.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | -| `backup.cronjob.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if influxdb.resources is set (influxdb.resources is recommended for production). | `none` | -| `backup.cronjob.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | -| `backup.podAffinityPreset` | Backup ™ Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `backup.podAntiAffinityPreset` | Backup™ Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `soft` | -| `backup.nodeAffinityPreset.type` | Backup™ Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` | `""` | -| `backup.nodeAffinityPreset.key` | Backup™ Node label key to match Ignored if `affinity` is set. | `""` | -| `backup.nodeAffinityPreset.values` | Backup™ Node label values to match. Ignored if `affinity` is set. | `[]` | -| `backup.affinity` | Backup™ Affinity for backup pod assignment | `{}` | -| `backup.nodeSelector` | Backup™ Node labels for backup pod assignment | `{}` | -| `backup.tolerations` | Backup™ Tolerations for backup pod assignment | `[]` | -| `backup.uploadProviders.google.enabled` | enable upload to google storage bucket | `false` | -| `backup.uploadProviders.google.secret` | json secret with serviceaccount data to access Google storage bucket | `""` | -| `backup.uploadProviders.google.secretKey` | service account secret key name | `key.json` | -| `backup.uploadProviders.google.existingSecret` | Name of existing secret object with Google serviceaccount json credentials | `""` | -| `backup.uploadProviders.google.bucketName` | google storage bucket name name | `gs://bucket/influxdb` | -| `backup.uploadProviders.google.image.registry` | Google Cloud SDK image registry | `REGISTRY_NAME` | -| `backup.uploadProviders.google.image.repository` | Google Cloud SDK image name | `REPOSITORY_NAME/google-cloud-sdk` | -| `backup.uploadProviders.google.image.digest` | Google Cloud SDK image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `backup.uploadProviders.google.image.pullPolicy` | Google Cloud SDK image pull policy | `IfNotPresent` | -| `backup.uploadProviders.google.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | -| `backup.uploadProviders.google.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if influxdb.resources is set (influxdb.resources is recommended for production). | `none` | -| `backup.uploadProviders.google.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | -| `backup.uploadProviders.azure.enabled` | Enable upload to azure storage container | `false` | -| `backup.uploadProviders.azure.secret` | Secret with credentials to access Azure storage | `""` | -| `backup.uploadProviders.azure.secretKey` | Service account secret key name | `connection-string` | -| `backup.uploadProviders.azure.existingSecret` | Name of existing secret object | `""` | -| `backup.uploadProviders.azure.containerName` | Destination container | `influxdb-container` | -| `backup.uploadProviders.azure.image.registry` | Azure CLI image registry | `REGISTRY_NAME` | -| `backup.uploadProviders.azure.image.repository` | Azure CLI image repository | `REPOSITORY_NAME/azure-cli` | -| `backup.uploadProviders.azure.image.digest` | Azure CLI image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `backup.uploadProviders.azure.image.pullPolicy` | Azure CLI image pull policy | `IfNotPresent` | -| `backup.uploadProviders.azure.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | -| `backup.uploadProviders.azure.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if influxdb.resources is set (influxdb.resources is recommended for production). | `none` | -| `backup.uploadProviders.azure.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | -| `backup.uploadProviders.aws.enabled` | Enable upload to aws s3 bucket | `false` | -| `backup.uploadProviders.aws.accessKeyID` | Access Key ID to access aws s3 | `""` | -| `backup.uploadProviders.aws.secretAccessKey` | Secret Access Key to access aws s3 | `""` | -| `backup.uploadProviders.aws.region` | Region of aws s3 bucket | `us-east-1` | -| `backup.uploadProviders.aws.existingSecret` | Name of existing secret object | `""` | -| `backup.uploadProviders.aws.bucketName` | aws s3 bucket name | `s3://bucket/influxdb` | -| `backup.uploadProviders.aws.endpoint` | aws s3 endpoint, no value default public endpoint aws s3 endpoint | `""` | -| `backup.uploadProviders.aws.usePasswordFiles` | Mount aws s3 credentials as files instead of using environment variables | `true` | -| `backup.uploadProviders.aws.image.registry` | AWS CLI image registry | `REGISTRY_NAME` | -| `backup.uploadProviders.aws.image.repository` | AWS CLI image repository | `REPOSITORY_NAME/aws-cli` | -| `backup.uploadProviders.aws.image.digest` | AWS CLI image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | -| `backup.uploadProviders.aws.image.pullPolicy` | AWS CLI image pull policy | `IfNotPresent` | -| `backup.uploadProviders.aws.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | -| `backup.uploadProviders.aws.resourcesPreset` | Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if influxdb.resources is set (influxdb.resources is recommended for production). | `none` | -| `backup.uploadProviders.aws.resources` | Set container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| Name | Description | Value | +| ------------------------------------------------------------------------------------------- | -------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------- | -------------------------- | +| `defaultInitContainers.volumePermissions.enabled` | Enable init-container that changes the owner and group of the persistent volume | `false` | +| `defaultInitContainers.volumePermissions.image.registry` | "volume-permissions" init-containers' image registry | `REGISTRY_NAME` | +| `defaultInitContainers.volumePermissions.image.repository` | "volume-permissions" init-containers' image repository | `REPOSITORY_NAME/os-shell` | +| `defaultInitContainers.volumePermissions.image.digest` | "volume-permissions" init-containers' image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `defaultInitContainers.volumePermissions.image.pullPolicy` | "volume-permissions" init-containers' image pull policy | `IfNotPresent` | +| `defaultInitContainers.volumePermissions.image.pullSecrets` | "volume-permissions" init-containers' image pull secrets | `[]` | +| `defaultInitContainers.volumePermissions.containerSecurityContext.enabled` | Enable "volume-permissions" init-containers' Security Context | `true` | +| `defaultInitContainers.volumePermissions.containerSecurityContext.seLinuxOptions` | Set SELinux options in "volume-permissions" init-containers | `{}` | +| `defaultInitContainers.volumePermissions.containerSecurityContext.runAsUser` | Set runAsUser in "volume-permissions" init-containers' Security Context | `0` | +| `defaultInitContainers.volumePermissions.containerSecurityContext.privileged` | Set privileged in "volume-permissions" init-containers' Security Context | `false` | +| `defaultInitContainers.volumePermissions.containerSecurityContext.allowPrivilegeEscalation` | Set allowPrivilegeEscalation in "volume-permissions" init-containers' Security Context | `false` | +| `defaultInitContainers.volumePermissions.containerSecurityContext.capabilities.add` | List of capabilities to be added in "volume-permissions" init-containers | `[]` | +| `defaultInitContainers.volumePermissions.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped in "volume-permissions" init-containers | `["ALL"]` | +| `defaultInitContainers.volumePermissions.containerSecurityContext.seccompProfile.type` | Set seccomp profile in "volume-permissions" init-containers | `RuntimeDefault` | +| `defaultInitContainers.volumePermissions.resourcesPreset` | Set InfluxDB(TM) Core "volume-permissions" init-container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if defaultInitContainers.volumePermissions.resources is set (defaultInitContainers.volumePermissions.resources is recommended for production). | `nano` | +| `defaultInitContainers.volumePermissions.resources` | Set InfluxDB(TM) Core "volume-permissions" init-container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | + +### Other Parameters + +| Name | Description | Value | +| --------------------------------------------- | ------------------------------------------------------------------------------------------ | ------- | +| `serviceAccount.create` | Enable creation of ServiceAccount for InfluxDB(TM) Core pods | `true` | +| `serviceAccount.name` | Name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | +| `serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `false` | +| `serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | +| `rbac.create` | Whether to create & use RBAC resources or not | `true` | + +### InfluxDB(TM) Core "create-admin-token" K8s Job parameters + +| Name | Description | Value | +| ----------------------------------------------------------------------- | ------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------ | -------------------------- | +| `createAdminTokenJob.enabled` | Whether to create a random admin token using a K8s job (ignored if `objectStore` is set to `memory` or `auth.enabled` is set to `false`). Warning: do not use this feature if Helm hooks aren't supported in your environment | `true` | +| `createAdminTokenJob.image.registry` | Kubectl image registry | `REGISTRY_NAME` | +| `createAdminTokenJob.image.repository` | Kubectl image repository | `REPOSITORY_NAME/os-shell` | +| `createAdminTokenJob.image.digest` | Kubectl image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag | `""` | +| `createAdminTokenJob.image.pullPolicy` | Kubectl image pull policy | `IfNotPresent` | +| `createAdminTokenJob.image.pullSecrets` | Kubectl image pull secrets | `[]` | +| `createAdminTokenJob.backoffLimit` | set backoff limit of the job | `10` | +| `createAdminTokenJob.containerSecurityContext.enabled` | Enable InfluxDB(TM) Core "create-admin-token" job's containers' Security Context | `true` | +| `createAdminTokenJob.containerSecurityContext.seLinuxOptions` | Set SELinux options in container | `{}` | +| `createAdminTokenJob.containerSecurityContext.runAsUser` | Set containers' Security Context runAsUser | `1001` | +| `createAdminTokenJob.containerSecurityContext.runAsGroup` | Set containers' Security Context runAsGroup | `1001` | +| `createAdminTokenJob.containerSecurityContext.runAsNonRoot` | Set container's Security Context runAsNonRoot | `true` | +| `createAdminTokenJob.containerSecurityContext.privileged` | Set container's Security Context privileged | `false` | +| `createAdminTokenJob.containerSecurityContext.readOnlyRootFilesystem` | Set container's Security Context readOnlyRootFilesystem | `true` | +| `createAdminTokenJob.containerSecurityContext.allowPrivilegeEscalation` | Set container's Security Context allowPrivilegeEscalation | `false` | +| `createAdminTokenJob.containerSecurityContext.capabilities.drop` | List of capabilities to be dropped | `["ALL"]` | +| `createAdminTokenJob.containerSecurityContext.seccompProfile.type` | Set container's Security Context seccomp profile | `RuntimeDefault` | +| `createAdminTokenJob.resourcesPreset` | Set InfluxDB(TM) Core "create-admin-token" job's container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if createAdminTokenJob.resources is set (createAdminTokenJob.resources is recommended for production). | `nano` | +| `createAdminTokenJob.resources` | Set InfluxDB(TM) Core "create-admin-token" job's container requests and limits for different resources like CPU or memory (essential for production workloads) | `{}` | +| `createAdminTokenJob.automountServiceAccountToken` | Mount Service Account token in InfluxDB(TM) Core "create-admin-token" job's pods | `true` | +| `createAdminTokenJob.hostAliases` | Add deployment host aliases | `[]` | +| `createAdminTokenJob.annotations` | Add annotations to the InfluxDB(TM) Core "create-admin-token" job | `{}` | +| `createAdminTokenJob.podLabels` | Additional pod labels for InfluxDB(TM) Core "create-admin-token" job | `{}` | +| `createAdminTokenJob.podAnnotations` | Additional pod annotations for InfluxDB(TM) Core "create-admin-token" job | `{}` | +| `createAdminTokenJob.topologyKey` | Override common lib default topology key. If empty - "kubernetes.io/hostname" is used | `""` | +| `createAdminTokenJob.affinity` | Affinity for InfluxDB(TM) Core create-admin-token pods assignment (evaluated as a template) | `{}` | +| `createAdminTokenJob.nodeAffinityPreset.key` | Node label key to match. Ignored if `createAdminTokenJob.affinity` is set. | `""` | +| `createAdminTokenJob.nodeAffinityPreset.type` | Node affinity preset type. Ignored if `createAdminTokenJob.affinity` is set. Allowed values: `soft` or `hard` | `""` | +| `createAdminTokenJob.nodeAffinityPreset.values` | Node label values to match. Ignored if `createAdminTokenJob.affinity` is set. | `[]` | +| `createAdminTokenJob.nodeSelector` | Node labels for InfluxDB(TM) Core create-admin-token pods assignment | `{}` | +| `createAdminTokenJob.podAffinityPreset` | Pod affinity preset. Ignored if `createAdminTokenJob.affinity` is set. Allowed values: `soft` or `hard`. | `""` | +| `createAdminTokenJob.podAntiAffinityPreset` | Pod anti-affinity preset. Ignored if `createAdminTokenJob.affinity` is set. Allowed values: `soft` or `hard`. | `soft` | +| `createAdminTokenJob.tolerations` | Tolerations for InfluxDB(TM) Core create-admin-token pods assignment | `[]` | +| `createAdminTokenJob.topologySpreadConstraints` | Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template | `[]` | +| `createAdminTokenJob.priorityClassName` | Priority Class Name | `""` | +| `createAdminTokenJob.schedulerName` | Use an alternate scheduler, e.g. "stork". | `""` | +| `createAdminTokenJob.terminationGracePeriodSeconds` | Seconds InfluxDB(TM) Core create-admin-token pod needs to terminate gracefully | `""` | +| `createAdminTokenJob.serviceAccount.create` | Enable creation of ServiceAccount for InfluxDB(TM) Core create-admin-token pods | `true` | +| `createAdminTokenJob.serviceAccount.name` | Name of the service account to use. If not set and `create` is `true`, a name is generated | `""` | +| `createAdminTokenJob.serviceAccount.automountServiceAccountToken` | Allows auto mount of ServiceAccountToken on the serviceAccount created | `true` | +| `createAdminTokenJob.serviceAccount.annotations` | Additional custom annotations for the ServiceAccount | `{}` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +helm install my-release \ + --set objectStore=file \ + oci://REGISTRY_NAME/REPOSITORY_NAME/influxdb +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. + +The above command sets the InfluxDB™ Core object store to `file`. Alternatively, a YAML file that specifies the values for the above parameters can be provided while installing the chart. For example, + +```console +helm install my-release -f values.yaml oci://REGISTRY_NAME/REPOSITORY_NAME/influxdb +``` + +> Note: You need to substitute the placeholders `REGISTRY_NAME` and `REPOSITORY_NAME` with a reference to your Helm chart registry and repository. For example, in the case of Bitnami, you need to use `REGISTRY_NAME=registry-1.docker.io` and `REPOSITORY_NAME=bitnamicharts`. +> **Tip**: You can use the default [values.yaml](https://github.com/bitnami/charts/tree/main/bitnami/influxdb/values.yaml) ## Troubleshooting @@ -506,6 +490,18 @@ Find more information about how to deal with common errors related to Bitnami's ## Upgrading +### To 7.0.0 + +This chart major version bumps the InfluxDB™ major version to `3.x` series. Please note InfluxDB™ Core 3 uses a completely different architecture and data engine (moving from TSM to Apache Arrow and Parquet stored on S3-compatible systems). Due to these architecture changes, the chart will be exclusively compatible with `3.x` container images from now on. + +There's no upgrade path from previous release. Quoting [this upstream blog post](https://www.influxdata.com/blog/influxdb-3-oss-ga/): + +> Since InfluxDB 3 Core is designed specifically for recent data (72 hours), our recommendation for migration is to mirror writes from older versions to a new InfluxDB 3 Core instance for a transition period, then switch over entirely after 72 hours. + +On this major version we also removed support for overriding configuration via configuration files, given InfluxDB™ Core 3 is designed to be configured via CLI flags and environment variables exclusively. Backup jobs were also removed, as store should be managed by the object store in the new architecture. + +Finally, this major version drops support for authentication based on users / password. Instead, a single admin token is used to authenticate every request. + ### To 6.5.0 This version introduces image verification for security purposes. To disable it, set `global.security.allowInsecureImages` to `true`. More details at [GitHub issue](https://github.com/bitnami/charts/issues/30850). diff --git a/bitnami/influxdb/files/conf/README.md b/bitnami/influxdb/files/conf/README.md deleted file mode 100644 index ea46eb8997..0000000000 --- a/bitnami/influxdb/files/conf/README.md +++ /dev/null @@ -1,7 +0,0 @@ -# How to use this folder - -Place your InfluxDB™ configuration file here. These will not be used in case the value *existingConfiguration* is used. - -More information can be found in the link below: - -- [InfluxDB™ Configuration File](https://github.com/bitnami/containers/tree/main/bitnami/influxdb#configuration-file) diff --git a/bitnami/influxdb/files/docker-entrypoint-initdb.d/README.md b/bitnami/influxdb/files/docker-entrypoint-initdb.d/README.md deleted file mode 100644 index 1bb42f866a..0000000000 --- a/bitnami/influxdb/files/docker-entrypoint-initdb.d/README.md +++ /dev/null @@ -1,5 +0,0 @@ -# How to use this folder - -You can copy here your custom `.sh` or `.txt` files so they are executed during the first boot of the image. - -More info in the [influxdb](https://github.com/bitnami/containers/tree/main/bitnami/influxdb#initializing-a-new-instance) container README. diff --git a/bitnami/influxdb/templates/NOTES.txt b/bitnami/influxdb/templates/NOTES.txt index 44d1ba4c85..c6a8452332 100644 --- a/bitnami/influxdb/templates/NOTES.txt +++ b/bitnami/influxdb/templates/NOTES.txt @@ -4,9 +4,23 @@ APP VERSION: {{ .Chart.AppVersion }} Did you know there are enterprise versions of the Bitnami catalog? For enhanced secure software supply chain features, unlimited pulls from Docker, LTS support, or application customization, see Bitnami Premium or Tanzu Application Catalog. See https://www.arrow.com/globalecs/na/vendors/bitnami for more information. +{{- if and (not .Values.auth.enabled) (or (contains .Values.service.type "NodePort") (contains .Values.service.type "LoadBalancer")) }} +------------------------------------------------------------------------------- + WARNING + + By not enabling "auth.enabled" you have most likely exposed the + InfluxDB(TM) Core service externally without any authentication mechanism. + + For security reasons, we strongly suggest that you enable authentication + setting the "auth.enabled" parameter to "true". + +------------------------------------------------------------------------------- +{{- end }} + ** Please be patient while the chart is being deployed ** {{- if .Values.diagnosticMode.enabled }} + The chart has been deployed in diagnostic mode. All probes have been disabled and the command has been overwritten with: command: {{- include "common.tplvalues.render" (dict "value" .Values.diagnosticMode.command "context" $) | nindent 4 }} @@ -26,122 +40,106 @@ In order to replicate the container startup scripts execute this command: {{- else }} -InfluxDB™ can be accessed through following DNS names from within your cluster: +InfluxDB(TM) Core can be accessed through following DNS name from within your cluster: - InfluxDB™: {{ include "common.names.fullname" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }} (port {{ coalesce .Values.influxdb.service.ports.http .Values.influxdb.service.port }}) - {{- if .Values.metrics.enabled }} - InfluxDB™ Prometheus Metrics: {{ include "common.names.fullname" . }}-metrics.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }} (port {{ .Values.metrics.service.port }}) - {{- end }} + {{ include "common.names.fullname" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }} (port {{ .Values.service.ports.http }}) -{{- if .Values.authEnabled }} +{{- if and .Values.auth.enabled (ne .Values.objectStore "memory") (or .Values.auth.existingSecret .Values.createAdminTokenJob.enabled) }} -To get the password for the {{ .Values.auth.admin.username }} user, run: +To get the admin token, run: - export ADMIN_PASSWORD=$(kubectl get secret --namespace {{ include "common.names.namespace" . }} {{ include "influxdb.secretName" . }} -o jsonpath="{.data.admin-user-password}" | base64 -d) + export ADMIN_TOKEN=$(kubectl get secret --namespace {{ include "common.names.namespace" . }} {{ include "influxdb.secret.name" . }} -o jsonpath="{.data.{{ include "influxdb.secret.adminTokenKey" . }}}" | base64 -d) -{{- if .Values.auth.user.username }} +{{- else if .Values.auth.enabled }} -To get the password for the {{ .Values.auth.user.username }} user, run: +No admin token was created, you can generate it running: - export USER_PASSWORD=$(kubectl get secret --namespace {{ include "common.names.namespace" . }} {{ include "influxdb.secretName" . }} -o jsonpath="{.data.user-password}" | base64 -d) + kubectl exec --namespace {{ include "common.names.namespace" . }} deploy/{{ include "common.names.fullname" . }} -- influxdb3 create token --admin --host http{{ if .Values.tls.enabled }}s{{ end }}://{{ include "common.names.fullname" . }}:{{ .Values.service.ports.http }} {{ if .Values.tls.enabled }}--tls-ca /opt/bitnami/influxdb/certs/ca/tls.crt{{ end }} + +Please ensure you note it down! {{- end }} -{{- if .Values.auth.readUser.username }} -To get the password for the {{ .Values.auth.readUser.username }} user, run: +To connect to your database, create a client pod: - export READ_USER_PASSWORD=$(kubectl get secret --namespace {{ include "common.names.namespace" . }} {{ include "influxdb.secretName" . }} -o jsonpath="{.data.read-user-password}" | base64 -d) + kubectl run --namespace {{ include "common.names.namespace" . }} {{ include "common.names.fullname" . }}-client --restart='Never' \ + {{ if .Values.auth.enabled }}--env ADMIN_TOKEN=$ADMIN_TOKEN {{ end }}{{ if and .Values.networkPolicy.enabled (not .Values.networkPolicy.allowExternal) }}--labels="{{ include "common.names.fullname" . }}-client=true" {{ end }}--image {{ include "influxdb.image" . }} \ + --command -- sleep infinity + +{{- if .Values.tls.enabled }} + +Copy your CA TLS certificates to the pod: + + kubectl get secret --namespace {{ include "common.names.namespace" . }} {{ include "influxdb.tls.ca.secretName" . }} -o json | jq -r '.data["tls.crt"]' | base64 --decode > /tmp/tls.crt + kubectl cp --namespace {{ include "common.names.namespace" . }} /tmp/tls.crt {{ include "common.names.fullname" . }}-client:/tmp/tls.crt {{- end }} -{{- if .Values.auth.writeUser.username }} -To get the password for the {{ .Values.auth.writeUser.username }} user, run: +Run any desired CLI command: - export WRITE_USER_PASSWORD=$(kubectl get secret --namespace {{ include "common.names.namespace" . }} {{ include "influxdb.secretName" . }} -o jsonpath="{.data.write-user-password}" | base64 -d) + kubectl exec --namespace {{ include "common.names.namespace" . }} {{ include "common.names.fullname" . }}-client \ + -- influxdb3 --host http{{ if .Values.tls.enabled }}s{{ end }}://{{ include "common.names.fullname" . }}:{{ .Values.service.ports.http }} {{ if .Values.auth.enabled }}--token $ADMIN_TOKEN {{ end }}{{ if .Values.tls.enabled }}--tls-ca /tmp/tls.crt{{ end }} -{{- end }} -{{- end }} +{{- if and .Values.networkPolicy.enabled (not .Values.networkPolicy.allowExternal) }} -To connect to your database run the following commands: - - kubectl run {{ include "common.names.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ include "common.names.namespace" . }} {{ if .Values.authEnabled }}--env="INFLUX_USERNAME={{ .Values.auth.admin.username }}" --env="INFLUX_PASSWORD=$ADMIN_PASSWORD"{{ end }} \ - {{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ include "common.names.fullname" . }}-client=true" {{ end }}--image {{ include "influxdb.image" . }} \ - --command -- influx -host {{ include "common.names.fullname" . }} -port {{ coalesce .Values.influxdb.service.ports.http .Values.influxdb.service.port }} - -{{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} - -Note: Since NetworkPolicy is enabled, only pods with label "{{ include "common.names.fullname" . }}-client=true" will be able to connect to InfluxDB™ server(s). +Note: Since NetworkPolicy is enabled, only pods with label "{{ include "common.names.fullname" . }}-client=true" will be able to connect to InfluxDB(TM) Core server(s). {{- end }} To connect to your database from outside the cluster execute the following commands: {{- if .Values.ingress.enabled }} -{{- $ingressHost := (tpl .Values.ingress.hostname .) }} - {{- if .Values.ingress.extraHosts }} - You should be able to access your new InfluxDB™ server(s) through: - {{- end }} - {{- range .Values.ingress.extraHosts }} - {{ if .tls }}https{{- else }}http{{ end }}://{{ .name }} - {{- end }} - e.g.: +1. Get the InfluxDB(TM) Core API URL and associate InfluxDB(TM) Core hostname to your cluster external IP: - {{ if .Values.authEnabled }}INFLUX_USERNAME="{{ .Values.auth.admin.username }}" INFLUX_PASSWORD="$ADMIN_PASSWORD"{{ end }} influx -host {{ $ingressHost }} -port 80 + export CLUSTER_IP=$(minikube ip) # On Minikube. Use: `kubectl cluster-info` on others K8s clusters + echo "InfluxDB(TM) Core API URL: http{{ if .Values.ingress.tls }}s{{ end }}://{{ tpl .Values.ingress.hostname . }}{{ .Values.ingress.path }}" + echo "$CLUSTER_IP {{ tpl .Values.ingress.hostname . }}" | sudo tee -a /etc/hosts -{{- else if contains "NodePort" .Values.influxdb.service.type }} +{{- else }} +{{- $port := .Values.service.ports.http | toString }} + +1. Get the InfluxDB(TM) Core API URL by running these commands: + +{{- if contains "NodePort" .Values.service.type }} export NODE_PORT=$(kubectl get --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "common.names.fullname" . }}) export NODE_IP=$(kubectl get nodes --namespace {{ include "common.names.namespace" . }} -o jsonpath="{.items[0].status.addresses[0].address}") - {{- if .Values.authEnabled }}INFLUX_USERNAME="{{ .Values.auth.admin.username }}" INFLUX_PASSWORD="$ADMIN_PASSWORD"{{- end }} influx -host $NODE_IP -port $NODE_PORT + echo "InfluxDB(TM) Core API URL: http{{ if .Values.ingress.tls }}s{{ end }}://$NODE_IP:$NODE_PORT/" -{{- else if contains "LoadBalancer" .Values.influxdb.service.type }} +{{- else if contains "LoadBalancer" .Values.service.type }} NOTE: It may take a few minutes for the LoadBalancer IP to be available. Watch the status with: 'kubectl get --namespace {{ include "common.names.namespace" . }} svc -w {{ include "common.names.fullname" . }}' - export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.names.namespace" . }} {{ include "common.names.fullname" . }} -o jsonpath='{.status.loadBalancer.ingress[0].ip}') - {{- if .Values.authEnabled }}INFLUX_USERNAME="{{ .Values.auth.admin.username }}" INFLUX_PASSWORD="$ADMIN_PASSWORD"{{- end }} influx -host $SERVICE_IP -port {{ coalesce .Values.influxdb.service.ports.http .Values.influxdb.service.port }} + export SERVICE_IP=$(kubectl get svc --namespace {{ include "common.names.namespace" . }} {{ include "common.names.fullname" . }} --template "{{ "{{ range (index .status.loadBalancer.ingress 0) }}{{ . }}{{ end }}" }}") + echo "InfluxDB(TM) Core API URL: http{{ if .Values.ingress.tls }}s{{ end }}://$SERVICE_IP{{- if ne $port "80" }}:{{ $port }}{{ end }}" -{{- else if contains "ClusterIP" .Values.influxdb.service.type }} +{{- else if contains "ClusterIP" .Values.service.type }} - kubectl port-forward --namespace {{ include "common.names.namespace" . }} svc/{{ include "common.names.fullname" . }} 8086:{{ coalesce .Values.influxdb.service.ports.http .Values.influxdb.service.port }} & - {{- if .Values.authEnabled }}INFLUX_USERNAME="{{ .Values.auth.admin.username }}" INFLUX_PASSWORD="$ADMIN_PASSWORD"{{- end }} influx -host 127.0.0.1 -port 8086 + kubectl port-forward --namespace {{ include "common.names.namespace" . }} svc/{{ include "common.names.fullname" . }} {{ $port }}:{{ $port }} & + echo "InfluxDB(TM) Core API URL: http{{ if .Values.ingress.tls }}s{{ end }}://127.0.0.1{{- if ne $port "80" }}:{{ $port }}{{ end }}" + +{{- end }} +{{- end }} + +2. Access the InfluxDB(TM) Core API URL using the CLI: + + influxdb3 --host {{ if .Values.auth.enabled }}--token {{ end }}{{ if .Values.tls.enabled }}--tls-ca {{ end }} {{- end }} {{- include "common.warnings.rollingTag" .Values.image }} -{{- include "common.warnings.rollingTag" .Values.backup.uploadProviders.google.image }} -{{- include "common.warnings.rollingTag" .Values.backup.uploadProviders.azure.image }} -{{- include "common.warnings.rollingTag" .Values.volumePermissions.image }} - -{{- $passwordValueConfigs := list -}} -{{- if not .Values.auth.existingSecret -}} - {{- $secretName := include "influxdb.secretName" . -}} - - {{- $adminPasswordConfig := dict "valueKey" "auth.admin.password" "secret" $secretName "field" "admin-user-password" "context" $ -}} - {{- $passwordValueConfigs = append $passwordValueConfigs $adminPasswordConfig -}} - {{- $adminTokenConfig := dict "valueKey" "auth.admin.token" "secret" $secretName "field" "admin-user-token" "context" $ -}} - {{- $passwordValueConfigs = append $passwordValueConfigs $adminTokenConfig -}} - - {{- if .Values.auth.user.username }} - {{- $userPasswordConfig := dict "valueKey" "auth.user.password" "secret" $secretName "field" "user-password" "context" $ -}} - {{- $passwordValueConfigs = append $passwordValueConfigs $userPasswordConfig -}} - {{- end }} - {{- if .Values.auth.readUser.username }} - {{- $readUserPasswordConfig := dict "valueKey" "auth.readUser.password" "secret" $secretName "field" "read-user-password" "context" $ -}} - {{- $passwordValueConfigs = append $passwordValueConfigs $readUserPasswordConfig -}} - {{- end }} - {{- if .Values.auth.writeUser.username }} - {{- $writeUserPasswordConfig := dict "valueKey" "auth.writeUser.password" "secret" $secretName "field" "write-user-password" "context" $ -}} - {{- $passwordValueConfigs = append $passwordValueConfigs $writeUserPasswordConfig -}} - {{- end }} -{{- end -}} - -{{- $passwordValidationErrors := include "common.validations.values.multiple.empty" (dict "required" $passwordValueConfigs "context" $) -}} -{{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $) -}} - +{{- include "common.warnings.rollingTag" .Values.defaultInitContainers.volumePermissions.image }} +{{- include "common.warnings.rollingTag" .Values.createAdminTokenJob.image }} +{{- $resourcesSections := list "" }} +{{- if .Values.defaultInitContainers.volumePermissions.enabled }} + {{- $resourcesSections = append $resourcesSections "defaultInitContainers.volumePermissions" }} {{- end }} -{{- include "common.warnings.resources" (dict "sections" (list "influxdb") "context" $) }} -{{- include "common.warnings.modifiedImages" (dict "images" (list .Values.image .Values.volumePermissions.image .Values.backup.uploadProviders.google.image .Values.backup.uploadProviders.azure.image .Values.backup.uploadProviders.aws.image) "context" $) }} -{{- include "common.errors.insecureImages" (dict "images" (list .Values.image .Values.volumePermissions.image .Values.backup.uploadProviders.google.image .Values.backup.uploadProviders.azure.image .Values.backup.uploadProviders.aws.image) "context" $) }} +{{- if and .Values.auth.enabled .Values.createAdminTokenJob.enabled (ne .Values.objectStore "memory") }} + {{- $resourcesSections = append $resourcesSections "createAdminTokenJob" }} +{{- end }} +{{- include "common.warnings.resources" (dict "sections" $resourcesSections "context" .) }} +{{- include "common.warnings.modifiedImages" (dict "images" (list .Values.image .Values.defaultInitContainers.volumePermissions.image .Values.createAdminTokenJob.image) "context" .) }} +{{- include "common.errors.insecureImages" (dict "images" (list .Values.image .Values.defaultInitContainers.volumePermissions.image .Values.createAdminTokenJob.image) "context" .) }} +{{- include "influxdb.validateValues" . }} diff --git a/bitnami/influxdb/templates/_helpers.tpl b/bitnami/influxdb/templates/_helpers.tpl index 9f2f9f7daf..bc0f9409c8 100644 --- a/bitnami/influxdb/templates/_helpers.tpl +++ b/bitnami/influxdb/templates/_helpers.tpl @@ -5,54 +5,39 @@ SPDX-License-Identifier: APACHE-2.0 {{/* vim: set filetype=mustache: */}} - {{/* -Return the proper InfluxDB™ image name +Return the proper InfluxDB™ Core image name */}} {{- define "influxdb.image" -}} {{ include "common.images.image" (dict "imageRoot" .Values.image "global" .Values.global) }} {{- end -}} {{/* -Return the proper init container volume-permissions image name +Return the proper image name (for the init container volume-permissions image) */}} {{- define "influxdb.volumePermissions.image" -}} -{{ include "common.images.image" (dict "imageRoot" .Values.volumePermissions.image "global" .Values.global) }} +{{ include "common.images.image" (dict "imageRoot" .Values.defaultInitContainers.volumePermissions.image "global" .Values.global) }} {{- end -}} {{/* -Return the proper gcloud-sdk image name +Return the proper image name (for the "create-admin-token" job image) */}} -{{- define "gcloudSdk.image" -}} -{{ include "common.images.image" (dict "imageRoot" .Values.backup.uploadProviders.google.image "global" .Values.global) }} -{{- end -}} - -{{/* -Return the proper azure-cli image name -*/}} -{{- define "azureCli.image" -}} -{{ include "common.images.image" (dict "imageRoot" .Values.backup.uploadProviders.azure.image "global" .Values.global) }} -{{- end -}} - -{{/* -Return the proper aws-cli image name -*/}} -{{- define "awsCli.image" -}} -{{ include "common.images.image" (dict "imageRoot" .Values.backup.uploadProviders.aws.image "global" .Values.global) }} +{{- define "influxdb.createAdminToken.image" -}} +{{ include "common.images.image" (dict "imageRoot" .Values.createAdminTokenJob.image "global" .Values.global) }} {{- end -}} {{/* Return the proper Docker Image Registry Secret Names */}} {{- define "influxdb.imagePullSecrets" -}} -{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.volumePermissions.image .Values.backup.uploadProviders.google.image .Values.backup.uploadProviders.azure.image) "global" .Values.global) }} +{{ include "common.images.pullSecrets" (dict "images" (list .Values.image .Values.defaultInitContainers.volumePermissions.image .Values.createAdminTokenJob.image) "global" .Values.global) }} {{- end -}} {{/* -Create the name of the service account to use +Create the name of the ServiceAccount to use */}} {{- define "influxdb.serviceAccountName" -}} -{{- if or .Values.serviceAccount.enabled .Values.serviceAccount.create -}} +{{- if .Values.serviceAccount.create -}} {{ default (include "common.names.fullname" .) .Values.serviceAccount.name }} {{- else -}} {{ default "default" .Values.serviceAccount.name }} @@ -60,74 +45,173 @@ Create the name of the service account to use {{- end -}} {{/* -Return the InfluxDB™ credentials secret. +Create the name of the ServiceAccount to use on "create-admin-token" job pods */}} -{{- define "influxdb.secretName" -}} +{{- define "influxdb.createAdminTokenJob.serviceAccountName" -}} +{{- if .Values.createAdminTokenJob.serviceAccount.create -}} + {{ default (printf "%s-create-admin-token" (include "common.names.fullname" .)) .Values.createAdminTokenJob.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.createAdminTokenJob.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the ServiceAccount to use on "delete-admin-token" job pods +*/}} +{{- define "influxdb.deleteAdminTokenJob.serviceAccountName" -}} +{{- if .Values.createAdminTokenJob.serviceAccount.create -}} + {{ default (printf "%s-delete-admin-token" (include "common.names.fullname" .)) .Values.createAdminTokenJob.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.createAdminTokenJob.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the InfluxDB™ Core secret name +*/}} +{{- define "influxdb.secret.name" -}} {{- if .Values.auth.existingSecret -}} - {{- printf "%s" (tpl .Values.auth.existingSecret $) -}} -{{- else -}} - {{- printf "%s" (include "common.names.fullname" .) -}} + {{- tpl .Values.auth.existingSecret . -}} +{{- else }} + {{- include "common.names.fullname" . -}} {{- end -}} {{- end -}} {{/* -Return the InfluxDB™ backup S3 secret. +Return the secret key that contains the InfluxDB™ Core admin token */}} -{{- define "influxdb.backup.secretName" -}} -{{- if .Values.backup.uploadProviders.aws.existingSecret -}} - {{- printf "%s" (tpl .Values.backup.uploadProviders.aws.existingSecret $) -}} +{{- define "influxdb.secret.adminTokenKey" -}} +{{- if and .Values.auth.existingSecret .Values.auth.existingSecretAdminTokenKey -}} + {{- tpl .Values.auth.existingSecretAdminTokenKey . -}} {{- else -}} - {{- printf "%s-backup-aws" (include "common.names.fullname" .) -}} + {{- print "admin-token" -}} {{- end -}} {{- end -}} {{/* -Return the InfluxDB™ configuration configmap. +Get the InfluxDB™ Core Store secret name */}} -{{- define "influxdb.configmapName" -}} -{{- if .Values.influxdb.existingConfiguration -}} - {{- printf "%s" (tpl .Values.influxdb.existingConfiguration $) -}} -{{- else -}} - {{- printf "%s" (include "common.names.fullname" .) -}} +{{- define "influxdb.store.secret.name" -}} +{{- if eq .Values.objectStore "s3" }} + {{- if .Values.s3.auth.existingSecret -}} + {{- tpl .Values.s3.auth.existingSecret . -}} + {{- else }} + {{- printf "%s-s3" (include "common.names.fullname" .) -}} + {{- end -}} +{{- else if eq .Values.objectStore "google" }} + {{- if .Values.google.auth.existingSecret -}} + {{- tpl .Values.google.auth.existingSecret . -}} + {{- else }} + {{- printf "%s-google" (include "common.names.fullname" .) -}} + {{- end -}} +{{- else if eq .Values.objectStore "azure" }} + {{- if .Values.azure.auth.existingSecret -}} + {{- tpl .Values.azure.auth.existingSecret . -}} + {{- else }} + {{- printf "%s-azure" (include "common.names.fullname" .) -}} + {{- end -}} {{- end -}} {{- end -}} {{/* -Return the InfluxDB™ PVC name. +Returns true if a secret should be created for InfluxDB™ Core Store credentials */}} -{{- define "influxdb.claimName" -}} -{{- if .Values.persistence.existingClaim }} - {{- printf "%s" (tpl .Values.persistence.existingClaim $) -}} -{{- else -}} - {{- printf "%s" (include "common.names.fullname" .) -}} +{{- define "influxdb.store.secret.create" -}} +{{- if or (and (eq .Values.objectStore "s3") (not .Values.s3.auth.existingSecret)) (and (eq .Values.objectStore "google") (not .Values.google.auth.existingSecret)) (and (eq .Values.objectStore "azure") (not .Values.azure.auth.existingSecret)) }} +true {{- end -}} {{- end -}} {{/* -Return the InfluxDB™ backup PVC name. +Return the name of the secret containing the CA TLS certificate */}} -{{- define "influxdb.backup.claimName" -}} -{{- if and .Values.backup.persistence.ownConfig .Values.backup.persistence.existingClaim }} - {{- printf "%s" (tpl .Values.backup.persistence.existingClaim $) -}} +{{- define "influxdb.tls.ca.secretName" -}} +{{- if or .Values.tls.autoGenerated.enabled (and (not (empty .Values.tls.ca))) -}} + {{- printf "%s-ca-crt" (include "common.names.fullname" .) -}} {{- else -}} - {{- printf "%s-backups" (include "common.names.fullname" .) -}} + {{- required "An existing secret name must be provided with a CA cert for InfluxDB(TM) Core if cert is not provided!" (tpl .Values.tls.existingCASecret .) -}} {{- end -}} {{- end -}} {{/* -Return the InfluxDB™ initialization scripts configmap. +Return the name of the secret containing the TLS certificates for InfluxDB™ Core servers +*/}} +{{- define "influxdb.tls.server.secretName" -}} +{{- if or .Values.tls.autoGenerated.enabled (and (not (empty .Values.tls.server.cert)) (not (empty .Values.tls.server.key))) -}} + {{- printf "%s-crt" (include "common.names.fullname" .) -}} +{{- else -}} + {{- required "An existing secret name must be provided with TLS certs for InfluxDB(TM) Core servers if cert and key are not provided!" (tpl .Values.tls.server.existingSecret .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return the InfluxDB™ Core initialization scripts ConfigMap name. */}} {{- define "influxdb.initdbScriptsConfigmapName" -}} -{{- if .Values.influxdb.initdbScriptsCM -}} - {{- printf "%s" (tpl .Values.influxdb.initdbScriptsCM $) -}} +{{- if .Values.initdbScriptsCM -}} + {{- print (tpl .Values.initdbScriptsCM .) -}} {{- else -}} {{- printf "%s-initdb-scripts" (include "common.names.fullname" .) -}} {{- end -}} {{- end -}} {{/* -Get the InfluxDB™ initialization scripts secret. +Return the InfluxDB™ Core initialization scripts Secret name */}} {{- define "influxdb.initdbScriptsSecret" -}} -{{- printf "%s" (tpl .Values.influxdb.initdbScriptsSecret $) -}} +{{- print (tpl .Values.initdbScriptsSecret .) -}} +{{- end -}} + +{{/* +Compile all warnings into a single message. +*/}} +{{- define "influxdb.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "influxdb.validateValues.replicaCount" .) -}} +{{- $messages := append $messages (include "influxdb.validateValues.auth.existingSecret" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of InfluxDB™ Core - replicaCount +*/}} +{{- define "influxdb.validateValues.replicaCount" -}} +{{- if and (or (eq .Values.objectStore "file") (eq .Values.objectStore "memory")) (or .Values.autoscaling.hpa.enabled (gt (int .Values.replicaCount) 1)) }} +replicaCount: + Running multiple InfluxDB(TM) Core replicas is not supported when using + the file or memory object store. Please ensure you run a single replica + and HPA is disabled (--set replicaCount=1,autoscaling.hpa.enabled=false). +{{- end -}} +{{- end -}} + +{{/* +Validate values of InfluxDB™ Core - auth.existingSecret +*/}} +{{- define "influxdb.validateValues.auth.existingSecret" -}} +{{- if and .Values.auth.enabled .Values.auth.existingSecret }} +{{- if .Values.createAdminTokenJob.enabled }} +auth.existingSecret: + Consuming the admin token from a secret is incompatible with running + a K8s job to create it. Please disable the job (--set createAdminTokenJob.enabled=false) + or unset the existingSecret value (--set auth.existingSecret=""). +{{- end -}} +{{- if eq .Values.objectStore "memory" }} +auth.existingSecret: + Consuming the admin token from a secret is incompatible with using + the memory object store given there's no existing data. + Please ensure you unset the existingSecret value (--set auth.existingSecret=""). +{{- else if and (eq .Values.objectStore "file") (or (not .Values.persistence.enabled) (not .Values.persistence.existingClaim)) }} +auth.existingSecret: + Consuming the admin token from a secret is incompatible with using + the file object store with no previously existing data. Please ensure + you set the PVC name with your existing data (--set persistence.enabled=true + --set persistence.existingClaim=) or unset the existingSecret value + (--set auth.existingSecret=""). +{{- end -}} +{{- end -}} {{- end -}} diff --git a/bitnami/influxdb/templates/_init_containers.tpl b/bitnami/influxdb/templates/_init_containers.tpl new file mode 100644 index 0000000000..30af529d90 --- /dev/null +++ b/bitnami/influxdb/templates/_init_containers.tpl @@ -0,0 +1,37 @@ +{{/* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{/* vim: set filetype=mustache: */}} + +{{/* +Returns an init-container that changes the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node +*/}} +{{- define "influxdb.defaultInitContainers.volumePermissions" -}} +- name: volume-permissions + image: {{ include "influxdb.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.defaultInitContainers.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.defaultInitContainers.volumePermissions.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.defaultInitContainers.volumePermissions.containerSecurityContext "context" .) | nindent 4 }} + {{- end }} + {{- if .Values.defaultInitContainers.volumePermissions.resources }} + resources: {{- toYaml .Values.defaultInitContainers.volumePermissions.resources | nindent 4 }} + {{- else if ne .Values.defaultInitContainers.volumePermissions.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.defaultInitContainers.volumePermissions.resourcesPreset) | nindent 4 }} + {{- end }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p {{ .Values.persistence.mountPath }} + {{- if eq ( toString ( .Values.defaultInitContainers.volumePermissions.containerSecurityContext.runAsUser )) "auto" }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R $(id -u):$(id -G | cut -d " " -f2) + {{- else }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" | xargs -r chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }} + {{- end }} + volumeMounts: + - name: data + mountPath: {{ .Values.persistence.mountPath }} +{{- end -}} diff --git a/bitnami/influxdb/templates/certs.yaml b/bitnami/influxdb/templates/certs.yaml new file mode 100644 index 0000000000..409aba0c1e --- /dev/null +++ b/bitnami/influxdb/templates/certs.yaml @@ -0,0 +1,87 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.tls.enabled .Values.tls.autoGenerated.enabled (eq .Values.tls.autoGenerated.engine "cert-manager") }} +{{- if empty .Values.tls.autoGenerated.certManager.existingIssuer }} +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: {{ printf "%s-clusterissuer" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" . ) | nindent 4 }} + app.kubernetes.io/part-of: influxdb + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" . ) | nindent 4 }} + {{- end }} +spec: + selfSigned: {} +--- +{{- end }} +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ printf "%s-ca-crt" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" . ) | nindent 4 }} + app.kubernetes.io/part-of: influxdb + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" . ) | nindent 4 }} + {{- end }} +spec: + secretName: {{ template "influxdb.tls.ca.secretName" . }} + commonName: {{ printf "%s-root-ca" (include "common.names.fullname" .) }} + isCA: true + issuerRef: + name: {{ default (printf "%s-clusterissuer" (include "common.names.fullname" .)) .Values.tls.autoGenerated.certManager.existingIssuer }} + kind: {{ default "Issuer" .Values.tls.autoGenerated.certManager.existingIssuerKind }} +--- +apiVersion: cert-manager.io/v1 +kind: Issuer +metadata: + name: {{ printf "%s-ca-issuer" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" . ) | nindent 4 }} + app.kubernetes.io/part-of: influxdb + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" . ) | nindent 4 }} + {{- end }} +spec: + ca: + secretName: {{ template "influxdb.tls.ca.secretName" . }} +--- +apiVersion: cert-manager.io/v1 +kind: Certificate +metadata: + name: {{ printf "%s-crt" (include "common.names.fullname" .) }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" . ) | nindent 4 }} + app.kubernetes.io/part-of: influxdb + app.kubernetes.io/component: influxdb + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" . ) | nindent 4 }} + {{- end }} +spec: + secretName: {{ template "influxdb.tls.server.secretName" . }} + commonName: {{ printf "%s.%s.svc.%s" (include "common.names.fullname" .) (include "common.names.namespace" .) .Values.clusterDomain }} + issuerRef: + name: {{ printf "%s-ca-issuer" (include "common.names.fullname" .) }} + kind: Issuer + subject: + organizations: + - "ClickHouse" + dnsNames: + - '*.{{ include "common.names.namespace" . }}' + - '*.{{ include "common.names.namespace" . }}.svc' + - '*.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}' + - '*.{{ include "common.names.fullname" . }}' + - '*.{{ include "common.names.fullname" . }}.{{ include "common.names.namespace" . }}' + - '*.{{ include "common.names.fullname" . }}.{{ include "common.names.namespace" . }}.svc' + - '*.{{ include "common.names.fullname" . }}.{{ include "common.names.namespace" . }}.svc.{{ .Values.clusterDomain }}' + privateKey: + algorithm: {{ .Values.tls.autoGenerated.certManager.keyAlgorithm }} + size: {{ int .Values.tls.autoGenerated.certManager.keySize }} + duration: {{ .Values.tls.autoGenerated.certManager.duration }} + renewBefore: {{ .Values.tls.autoGenerated.certManager.renewBefore }} +{{- end }} diff --git a/bitnami/influxdb/templates/configmap-backup.yaml b/bitnami/influxdb/templates/configmap-backup.yaml deleted file mode 100644 index d35d3b24f1..0000000000 --- a/bitnami/influxdb/templates/configmap-backup.yaml +++ /dev/null @@ -1,72 +0,0 @@ -{{- /* -Copyright Broadcom, Inc. All Rights Reserved. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if .Values.backup.enabled }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "common.names.fullname" . }}-backup - namespace: {{ include "common.names.namespace" . | quote }} - labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} - app.kubernetes.io/component: influxdb - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -data: - backup.sh: |- - #!/bin/bash - - set -e - - . /opt/bitnami/scripts/libinfluxdb.sh - - DATE="$(date +%Y%m%d_%H%M%S)" - - host="{{ include "common.names.fullname" . }}.{{ include "common.names.namespace" . }}.svc" - - export INFLUX_TOKEN={{ ternary "$(cat \"${INFLUXDB_ADMIN_USER_TOKEN_FILE}\")" "${INFLUXDB_ADMIN_USER_TOKEN}" .Values.auth.usePasswordFiles }} - - get_orgs() { - influx org list --host "http://${host}:{{ coalesce .Values.influxdb.service.ports.http .Values.influxdb.service.port }}" 2> /dev/null | grep -v 'ID' | awk -F '\t' 'BEGIN{ORS=" "} {print $2}' - } - - get_databases() { - local org_name="${1:-}" - influx bucket list --host "http://${host}:{{ coalesce .Values.influxdb.service.ports.http .Values.influxdb.service.port }}" --org "${org_name}" 2> /dev/null | grep -v 'ID' | awk -F '\t' 'BEGIN{ORS=" "} {print $2}' - } - - for ORG in $(get_orgs); do - echo "finding buckets in org ${ORG}" - for BUCKET in $(get_databases "${ORG}"); do - backup_dir="{{ .Values.backup.directory }}/${ORG}/${BUCKET}" - echo "backuping ${BUCKET} bucket to ${backup_dir}" - mkdir -p "${backup_dir}" - - influx backup --host "http://${host}:{{ coalesce .Values.influxdb.service.ports.http .Values.influxdb.service.port }}" --bucket "${BUCKET}" "${backup_dir}/${DATE}" - done - done - - echo "deleting old backups" - find {{ .Values.backup.directory }} -mindepth 3 -maxdepth 3 -not -name ".snapshot" -not -name "lost+found" -type d -mtime +{{ .Values.backup.retentionDays }} -exec rm -r {} \; - upload-google.sh: |- - #!/bin/sh - - set -e - - gcloud auth activate-service-account --key-file /var/secrets/google/{{ .Values.backup.uploadProviders.google.secretKey }} - gcloud storage rsync -r {{ .Values.backup.directory }}/ {{ .Values.backup.uploadProviders.google.bucketName }} --delete-unmatched-destination-objects - upload-azure.sh: |- - #!/bin/sh - - set -e - - az storage blob sync --source {{ .Values.backup.directory }} --container {{ .Values.backup.uploadProviders.azure.containerName }} - upload-aws.sh: |- - #!/bin/sh - - set -e - - aws s3 sync {{ .Values.backup.directory }} {{ .Values.backup.uploadProviders.aws.bucketName }} -{{ end }} diff --git a/bitnami/influxdb/templates/configmap-initdb-scripts.yaml b/bitnami/influxdb/templates/configmap-initdb-scripts.yaml index f7f74fc14c..192a9a6a81 100644 --- a/bitnami/influxdb/templates/configmap-initdb-scripts.yaml +++ b/bitnami/influxdb/templates/configmap-initdb-scripts.yaml @@ -3,17 +3,18 @@ Copyright Broadcom, Inc. All Rights Reserved. SPDX-License-Identifier: APACHE-2.0 */}} -{{- if and (.Values.influxdb.initdbScripts) (not .Values.influxdb.initdbScriptsCM) }} +{{- if and .Values.initdbScripts (not .Values.initdbScriptsCM) }} apiVersion: v1 kind: ConfigMap metadata: - name: {{ include "common.names.fullname" . }}-initdb-scripts + name: {{ printf "%s-initdb-scripts" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} namespace: {{ include "common.names.namespace" . | quote }} - labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + labels: {{- include "common.labels.standard" (dict "customLabels" .Values.commonLabels "context" .) | nindent 4 }} + app.kubernetes.io/part-of: influxdb app.kubernetes.io/component: influxdb {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.commonAnnotations "context" .) | nindent 4 }} {{- end }} data: - {{ include "common.tplvalues.render" (dict "value" .Values.influxdb.initdbScripts "context" $) | nindent 2 }} + {{ include "common.tplvalues.render" (dict "value" .Values.initdbScripts "context" .) | nindent 2 }} {{- end }} diff --git a/bitnami/influxdb/templates/configmap.yaml b/bitnami/influxdb/templates/configmap.yaml deleted file mode 100644 index 4c40b47896..0000000000 --- a/bitnami/influxdb/templates/configmap.yaml +++ /dev/null @@ -1,21 +0,0 @@ -{{- /* -Copyright Broadcom, Inc. All Rights Reserved. -SPDX-License-Identifier: APACHE-2.0 -*/}} - -{{- if and .Values.influxdb.configuration (not .Values.influxdb.existingConfiguration) }} -apiVersion: v1 -kind: ConfigMap -metadata: - name: {{ include "common.names.fullname" . }} - namespace: {{ include "common.names.namespace" . | quote }} - labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} - app.kubernetes.io/component: influxdb - {{- if .Values.commonAnnotations }} - annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} - {{- end }} -data: - config.yaml: |- - # User-supplied configuration: - {{ include "common.tplvalues.render" (dict "value" .Values.influxdb.configuration "context" $) | nindent 4 }} -{{- end }} diff --git a/bitnami/influxdb/templates/create-admin-token-job.yaml b/bitnami/influxdb/templates/create-admin-token-job.yaml new file mode 100644 index 0000000000..9167da9e56 --- /dev/null +++ b/bitnami/influxdb/templates/create-admin-token-job.yaml @@ -0,0 +1,223 @@ +{{- /* +Copyright Broadcom, Inc. All Rights Reserved. +SPDX-License-Identifier: APACHE-2.0 +*/}} + +{{- if and .Values.auth.enabled (not .Values.auth.existingSecret) .Values.createAdminTokenJob.enabled (ne .Values.objectStore "memory") }} +apiVersion: batch/v1 +kind: Job +metadata: + name: {{ printf "%s-create-admin-token" (include "common.names.fullname" .) | trunc 63 | trimSuffix "-" }} + namespace: {{ include "common.names.namespace" . | quote }} + labels: {{- include "common.labels.standard" ( dict "customLabels" .Values.commonLabels "context" $ ) | nindent 4 }} + app.kubernetes.io/part-of: influxdb + app.kubernetes.io/component: create-admin-token + {{- $defaultAnnotations := dict "helm.sh/hook" "pre-install" "helm.sh/hook-delete-policy" "before-hook-creation,hook-succeeded" }} + {{- $annotations := include "common.tplvalues.merge" (dict "values" (list .Values.createAdminTokenJob.annotations .Values.commonAnnotations $defaultAnnotations) "context" .) }} + annotations: {{- include "common.tplvalues.render" (dict "value" $annotations "context" .) | nindent 4 }} +spec: + backoffLimit: {{ .Values.createAdminTokenJob.backoffLimit }} + template: + metadata: + {{- $podLabels := include "common.tplvalues.merge" ( dict "values" ( list .Values.createAdminTokenJob.podLabels .Values.commonLabels ) "context" . ) }} + labels: {{- include "common.labels.standard" ( dict "customLabels" $podLabels "context" $ ) | nindent 8 }} + app.kubernetes.io/part-of: influxdb + app.kubernetes.io/component: create-admin-token + {{- if .Values.createAdminTokenJob.podAnnotations }} + annotations: {{- include "common.tplvalues.render" (dict "value" .Values.createAdminTokenJob.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- include "influxdb.imagePullSecrets" . | nindent 6 }} + restartPolicy: OnFailure + automountServiceAccountToken: {{ .Values.createAdminTokenJob.automountServiceAccountToken }} + serviceAccountName: {{ include "influxdb.createAdminTokenJob.serviceAccountName" . }} + {{- if .Values.createAdminTokenJob.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.createAdminTokenJob.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.createAdminTokenJob.affinity }} + affinity: {{- include "common.tplvalues.render" (dict "value" .Values.createAdminTokenJob.affinity "context" $) | nindent 8 }} + {{- else }} + affinity: + podAffinity: {{- include "common.affinities.pods" (dict "type" .Values.createAdminTokenJob.podAffinityPreset "component" "create-admin-token" "customLabels" $podLabels "topologyKey" .Values.createAdminTokenJob.topologyKey "context" $) | nindent 10 }} + podAntiAffinity: {{- include "common.affinities.pods" (dict "type" .Values.createAdminTokenJob.podAntiAffinityPreset "component" "create-admin-token" "customLabels" $podLabels "topologyKey" .Values.createAdminTokenJob.topologyKey "context" $) | nindent 10 }} + nodeAffinity: {{- include "common.affinities.nodes" (dict "type" .Values.createAdminTokenJob.nodeAffinityPreset.type "key" .Values.createAdminTokenJob.nodeAffinityPreset.key "values" .Values.createAdminTokenJob.nodeAffinityPreset.values) | nindent 10 }} + {{- end }} + {{- if .Values.createAdminTokenJob.nodeSelector }} + nodeSelector: {{- include "common.tplvalues.render" (dict "value" .Values.createAdminTokenJob.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.createAdminTokenJob.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.createAdminTokenJob.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.createAdminTokenJob.tolerations }} + tolerations: {{- include "common.tplvalues.render" (dict "value" .Values.createAdminTokenJob.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.createAdminTokenJob.topologySpreadConstraints }} + topologySpreadConstraints: {{- include "common.tplvalues.render" (dict "value" .Values.createAdminTokenJob.topologySpreadConstraints "context" .) | nindent 8 }} + {{- end }} + {{- if .Values.createAdminTokenJob.priorityClassName }} + priorityClassName: {{ .Values.createAdminTokenJob.priorityClassName | quote }} + {{- end }} + {{- if .Values.createAdminTokenJob.schedulerName }} + schedulerName: {{ .Values.createAdminTokenJob.schedulerName }} + {{- end }} + {{- if .Values.podSecurityContext.enabled }} + # We use the same security context as InfluxDB pods to avoid permission issues on shared PVC + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.podSecurityContext "context" .) | nindent 8 }} + {{- end }} + initContainers: + {{- if and .Values.defaultInitContainers.volumePermissions.enabled .Values.persistence.enabled (eq .Values.objectStore "file") }} + {{- include "influxdb.defaultInitContainers.volumePermissions" . | nindent 8 }} + {{- end }} + - name: influxdb + image: {{ include "influxdb.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + {{- if .Values.containerSecurityContext.enabled }} + # We use the same security context as InfluxDB containers to avoid permission issues on shared PVC + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.containerSecurityContext "context" .) | nindent 12 }} + {{- end }} + # We also use the same resources as InfluxDB to ensure we can start InfluxDB in background + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- else if ne .Values.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.resourcesPreset) | nindent 12 }} + {{- end }} + command: + - /bin/bash + args: + - -ec + - | + . /opt/bitnami/scripts/influxdb-env.sh + . /opt/bitnami/scripts/libinfluxdb.sh + + trap "influxdb_stop" EXIT + influxdb_start_bg + influxdb3_create_admin_token + chmod 444 $INFLUXDB_AUTOGEN_ADMIN_TOKEN_FILE + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" (or .Values.image.debug .Values.diagnosticMode.enabled) | quote }} + - name: INFLUXDB_OBJECT_STORE + value: {{ .Values.objectStore | quote }} + - name: INFLUXDB_DATA_DIR + value: {{ printf "%s/data" .Values.persistence.mountPath | quote }} + - name: INFLUXDB_NODE_ID + value: {{ .Values.nodeId | quote }} + - name: INFLUXDB_AUTOGEN_ADMIN_TOKEN_FILE + value: "/shared/token" + {{- if or (eq .Values.objectStore "s3") (eq .Values.objectStore "google") (eq .Values.objectStore "azure") }} + - name: INFLUXDB3_BUCKET + value: {{ .Values.bucket | quote }} + {{- end }} + {{- if eq .Values.objectStore "s3" }} + - name: AWS_DEFAULT_REGION + value: {{ .Values.s3.defaultRegion | quote }} + {{- if .Values.s3.endpoint }} + - name: AWS_ENDPOINT + value: {{ .Values.s3.endpoint | quote }} + {{- end }} + {{- if .Values.usePasswordFiles }} + - name: AWS_ACCESS_KEY_ID_FILE + value: "/opt/bitnami/influxdb/secrets/s3-access-key-id" + - name: AWS_SECRET_ACCESS_KEY_FILE + value: "/opt/bitnami/influxdb/secrets/s3-secret-access-key" + {{- else }} + - name: AWS_ACCESS_KEY_ID + valueFrom: + secretKeyRef: + name: {{ template "influxdb.store.secret.name" . }} + key: s3-access-key-id + - name: AWS_SECRET_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ template "influxdb.store.secret.name" . }} + key: s3-secret-access-key + {{- end }} + {{- else if eq .Values.objectStore "google" }} + {{- if .Values.usePasswordFiles }} + - name: GOOGLE_SERVICE_ACCOUNT_FILE + value: "/opt/bitnami/influxdb/secrets/google-service-account-key" + {{- else }} + - name: GOOGLE_SERVICE_ACCOUNT + valueFrom: + secretKeyRef: + name: {{ template "influxdb.store.secret.name" . }} + key: google-service-account-key + {{- end }} + {{- else if eq .Values.objectStore "azure" }} + - name: AZURE_STORAGE_ACCOUNT + value: {{ .Values.azure.account | quote }} + {{- if .Values.usePasswordFiles }} + - name: AZURE_STORAGE_ACCESS_KEY_FILE + value: "/opt/bitnami/influxdb/secrets/azure-storage-access-key" + {{- else }} + - name: AZURE_STORAGE_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ template "influxdb.store.secret.name" . }} + key: azure-storage-access-key + {{- end }} + {{- end }} + volumeMounts: + - name: empty-dir + mountPath: /shared + - name: data + mountPath: {{ .Values.persistence.mountPath }} + {{- if and (or (eq .Values.objectStore "s3") (eq .Values.objectStore "google") (eq .Values.objectStore "azure")) .Values.usePasswordFiles }} + - name: store-secret + mountPath: /opt/bitnami/influxdb/secrets + readOnly: true + {{- end }} + containers: + - name: kubectl + image: {{ include "influxdb.createAdminToken.image" . }} + imagePullPolicy: {{ .Values.createAdminTokenJob.image.pullPolicy }} + {{- if .Values.createAdminTokenJob.containerSecurityContext.enabled }} + securityContext: {{- include "common.compatibility.renderSecurityContext" (dict "secContext" .Values.createAdminTokenJob.containerSecurityContext "context" .) | nindent 12 }} + {{- end }} + {{- if .Values.createAdminTokenJob.resources }} + resources: {{- toYaml .Values.createAdminTokenJob.resources | nindent 12 }} + {{- else if ne .Values.createAdminTokenJob.resourcesPreset "none" }} + resources: {{- include "common.resources.preset" (dict "type" .Values.createAdminTokenJob.resourcesPreset) | nindent 12 }} + {{- end }} + command: + - /bin/bash + args: + - -ec + - | + encoded_token="$(tr -d '\n' < /shared/token | base64 -w 0)" + cat < + ## + nodePorts: + http: "" + ## @param service.sessionAffinity Control where client requests go, to the same pod or round-robin + ## Values: ClientIP or None + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/ + ## + sessionAffinity: None + ## @param service.sessionAffinityConfig Additional settings for the sessionAffinity + ## sessionAffinityConfig: + ## clientIP: + ## timeoutSeconds: 300 + ## + sessionAffinityConfig: {} + ## @param service.clusterIP InfluxDB(TM) Core service Cluster IP + ## e.g.: + ## clusterIP: None + ## + clusterIP: "" + ## @param service.loadBalancerIP InfluxDB(TM) Core service Load Balancer IP + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + loadBalancerIP: "" + ## @param service.loadBalancerSourceRanges InfluxDB(TM) service Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## e.g: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## @param service.externalTrafficPolicy InfluxDB(TM) Core service external traffic policy + ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + ## @param service.extraPorts Extra port to expose on InfluxDB(TM) Core service + ## + extraPorts: [] + ## @param service.annotations Additional custom annotations for InfluxDB(TM) Core service + ## + annotations: {} +## InfluxDB(TM) Core ingress parameters ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/ ## ingress: - ## @param ingress.enabled Enable ingress controller resource + ## @param ingress.enabled Enable ingress record generation for InfluxDB(TM) Core ## enabled: false - ## @param ingress.tls Create TLS Secret - ## TLS certificates will be retrieved from a TLS secret with name: {{- printf "%s-tls" (tpl .Values.ingress.hostname .) }} - ## You can use the ingress.secrets parameter to create this TLS secret on cert-manager to create it + ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) + ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . + ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ ## - tls: false - ## DEPRECATED: Use ingress.annotations instead of ingress.certManager - ## certManager: false - ## - + ingressClassName: "" ## @param ingress.pathType Ingress path type ## pathType: ImplementationSpecific ## @param ingress.apiVersion Force Ingress API version (automatically detected if not set) ## apiVersion: "" - ## @param ingress.hostname Default host for the ingress resource (evaluated as template) + ## @param ingress.hostname Default host for the ingress record ## hostname: influxdb.local - ## @param ingress.path Ingress path*' in order to use this - ## with ALB ingress controllers. + ## @param ingress.path Default path for the ingress record + ## NOTE: You may need to set this to '/*' in order to use this with ALB ingress controllers ## path: / - ## @param ingress.annotations Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. - ## For a full list of possible ingress annotations, please see - ## ref: https://github.com/kubernetes/ingress-nginx/blob/main/docs/user-guide/nginx-configuration/annotations.md + ## @param ingress.annotations [object] Additional annotations for the Ingress resource. To enable certificate autogeneration, place here your cert-manager annotations. ## Use this parameter to set the required annotations for cert-manager, see ## ref: https://cert-manager.io/docs/usage/ingress/#supported-annotations - ## ## e.g: ## annotations: ## kubernetes.io/ingress.class: nginx ## cert-manager.io/cluster-issuer: cluster-issuer-name ## annotations: {} - ## @param ingress.extraHosts The list of additional hostnames to be covered with this ingress record. - ## Most likely the hostname above will be enough, but in the event more hosts are needed, this is an array + ## @param ingress.tls Enable TLS configuration for the host defined at `ingress.hostname` parameter + ## TLS certificates will be retrieved from a TLS secret with name: `{{- printf "%s-tls" .Values.ingress.hostname }}` + ## You can: + ## - Use the `ingress.secrets` parameter to create this TLS secret + ## - Rely on cert-manager to create it by setting the corresponding annotations + ## - Rely on Helm to create self-signed certificates by setting `ingress.selfSigned=true` + ## + tls: false + ## @param ingress.selfSigned Create a TLS secret for this ingress record using self-signed certificates generated by Helm + ## + selfSigned: false + ## @param ingress.extraHosts An array with additional hostname(s) to be covered with the ingress record + ## e.g: ## extraHosts: - ## - name: influxdb.local - ## path: / + ## - name: influxdb.local + ## path: / ## extraHosts: [] - ## @param ingress.extraPaths Additional arbitrary path/backend objects - ## For example: The ALB ingress controller requires a special rule for handling SSL redirection. + ## @param ingress.extraPaths An array with additional arbitrary paths that may need to be added to the ingress under the main host + ## e.g: ## extraPaths: ## - path: /* ## backend: @@ -635,33 +624,35 @@ ingress: ## servicePort: use-annotation ## extraPaths: [] - ## @param ingress.extraTls The tls configuration for additional hostnames to be covered with this ingress record. - ## see: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## @param ingress.extraTls TLS configuration for additional hostname(s) to be covered with this ingress record + ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#tls + ## e.g: ## extraTls: ## - hosts: ## - influxdb.local ## secretName: influxdb.local-tls ## extraTls: [] - ## @param ingress.secrets If you're providing your own certificates, please use this to add the certificates as secrets - ## key and certificate should start with -----BEGIN CERTIFICATE----- or - ## -----BEGIN RSA PRIVATE KEY----- - ## - ## name should line up with a tlsSecret set further up - ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set - ## + ## @param ingress.secrets Custom TLS certificates as secrets + ## NOTE: 'key' and 'certificate' are expected in PEM format + ## NOTE: 'name' should line up with a 'secretName' set further up + ## If it is not set and you're using cert-manager, this is unneeded, as it will create a secret for you with valid certificates + ## If it is not set and you're NOT using cert-manager either, self-signed certificates will be created valid for 365 days ## It is also possible to create and manage the certificates outside of this helm chart ## Please see README.md for more information - ## - name: influxdb.local-tls - ## key: - ## certificate: + ## e.g: + ## secrets: + ## - name: influxdb.local-tls + ## key: |- + ## -----BEGIN RSA PRIVATE KEY----- + ## ... + ## -----END RSA PRIVATE KEY----- + ## certificate: |- + ## -----BEGIN CERTIFICATE----- + ## ... + ## -----END CERTIFICATE----- ## secrets: [] - ## @param ingress.ingressClassName IngressClass that will be be used to implement the Ingress (Kubernetes 1.18+) - ## This is supported in Kubernetes 1.18+ and required if you have more than one IngressClass marked as the default for your cluster . - ## ref: https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/ - ## - ingressClassName: "" ## @param ingress.extraRules Additional rules to be covered with this ingress record ## ref: https://kubernetes.io/docs/concepts/services-networking/ingress/#ingress-rules ## e.g: @@ -676,107 +667,6 @@ ingress: ## name: http ## extraRules: [] -## @section Metrics parameters - -## Prometheus metrics -## ref: https://docs.influxdata.com/influxdb/v1.7/administration/server_monitoring/#influxdb-metrics-http-endpoint -## -metrics: - ## @param metrics.enabled Enable the export of Prometheus metrics - ## - enabled: false - service: - ## @param metrics.service.type Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) - ## - type: ClusterIP - ## @param metrics.service.port InfluxDB(TM) Prometheus port - ## - port: 9122 - ## @param metrics.service.nodePort Kubernetes HTTP node port - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport - ## - nodePort: "" - ## @param metrics.service.loadBalancerIP loadBalancerIP if service type is `LoadBalancer` - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer - ## - loadBalancerIP: "" - ## @param metrics.service.loadBalancerSourceRanges Address that are allowed when service is LoadBalancer - ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service - ## - ## loadBalancerSourceRanges: - ## - 10.10.10.0/24 - loadBalancerSourceRanges: [] - ## @param metrics.service.clusterIP Static clusterIP or None for headless services - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address - ## e.g: - ## clusterIP: None - ## - clusterIP: "" - ## @param metrics.service.annotations [object] Annotations for the Prometheus metrics service - ## - annotations: - prometheus.io/scrape: "true" - prometheus.io/port: "{{ .Values.metrics.service.port }}" - prometheus.io/path: "/metrics" - ## @param metrics.service.externalTrafficPolicy Service external traffic policy - ## ref http://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip - ## - externalTrafficPolicy: Cluster - ## @param metrics.service.extraPorts Extra ports to expose (normally used with the `sidecar` value) - ## - extraPorts: [] - ## @param metrics.service.sessionAffinity Session Affinity for Kubernetes service, can be "None" or "ClientIP" - ## If "ClientIP", consecutive client requests will be directed to the same mongos Pod - ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#virtual-ips-and-service-proxies - ## - sessionAffinity: None - ## @param metrics.service.sessionAffinityConfig Additional settings for the sessionAffinity - ## sessionAffinityConfig: - ## clientIP: - ## timeoutSeconds: 300 - ## - sessionAffinityConfig: {} - ## Prometheus Operator ServiceMonitor configuration - ## - serviceMonitor: - ## @param metrics.serviceMonitor.enabled if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) - ## - enabled: false - ## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running - ## e.g: - ## namespace: monitoring - ## - namespace: "" - ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped. - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## e.g: - ## interval: 10s - ## - interval: "" - ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint - ## e.g: - ## scrapeTimeout: 10s - ## - scrapeTimeout: "" - ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig - ## - relabelings: [] - ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion - ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig - ## - metricRelabelings: [] - ## @param metrics.serviceMonitor.selector Prometheus instance selector labels - ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration - ## e.g: - ## selector: - ## prometheus: my-prometheus - ## - selector: {} - ## @param metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels - ## - honorLabels: false ## Network Policies ## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ ## @@ -832,231 +722,165 @@ networkPolicy: ## ingressNSMatchLabels: {} ingressNSPodMatchLabels: {} -## Persistence parameters + +## @section Metrics parameters + +## Prometheus metrics +## +metrics: + ## @param metrics.enabled Enable the export of Prometheus metrics + ## + enabled: false + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + ## @param metrics.serviceMonitor.enabled if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) + ## + enabled: false + ## @param metrics.serviceMonitor.namespace Namespace in which Prometheus is running + ## e.g: + ## namespace: monitoring + ## + namespace: "" + ## @param metrics.serviceMonitor.labels Additional labels that can be used so ServiceMonitor will be discovered by Prometheus + ## + labels: {} + ## @param metrics.serviceMonitor.interval Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## e.g: + ## interval: 10s + ## + interval: "" + ## @param metrics.serviceMonitor.scrapeTimeout Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## e.g: + ## scrapeTimeout: 10s + ## + scrapeTimeout: "" + ## @param metrics.serviceMonitor.relabelings RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + ## + relabelings: [] + ## @param metrics.serviceMonitor.metricRelabelings MetricRelabelConfigs to apply to samples before ingestion + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + ## + metricRelabelings: [] + ## @param metrics.serviceMonitor.selector Prometheus instance selector labels + ## ref: https://github.com/bitnami/charts/tree/main/bitnami/prometheus-operator#prometheus-configuration + ## e.g: + ## selector: + ## prometheus: my-prometheus + ## + selector: {} + ## @param metrics.serviceMonitor.honorLabels honorLabels chooses the metric's labels on collisions with target labels + ## + honorLabels: false + +## @section Persistence parameters + +## Enable persistence using Persistent Volume Claims +## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ ## persistence: - ## @param persistence.enabled Enable data persistence + ## @param persistence.enabled Enable InfluxDB(TM) Core data persistence (ignored unless `objectStore` is set to `file`) ## enabled: true - ## @param persistence.existingClaim Use a existing PVC which must be created manually before bound + ## @param persistence.existingClaim A manually managed Persistent Volume and Claim ## If defined, PVC must be created manually before volume will be bound ## The value is evaluated as a template ## existingClaim: "" - ## @param persistence.storageClass Specify the `storageClass` used to provision the volume + ## @param persistence.storageClass PVC Storage Class for InfluxDB(TM) Core data volume ## If defined, storageClassName: ## If set to "-", storageClassName: "", which disables dynamic provisioning ## If undefined (the default) or set to null, no storageClassName spec is ## set, choosing the default provisioner. ## storageClass: "" - ## @param persistence.accessModes Access mode of data volume + ## @param persistence.accessModes Persistent Volume Access Modes ## accessModes: - ReadWriteOnce - ## @param persistence.size Size of data volume + ## @param persistence.size PVC Storage Request for InfluxDB(TM) Core data volume ## size: 8Gi - ## @param persistence.annotations Persistent Volume Claim annotations + ## @param persistence.dataSource Custom PVC data source + ## + dataSource: {} + ## @param persistence.annotations Additional custom annotations for the PVC ## annotations: {} -## Pod Service Account -## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ -## @param serviceAccount.create Specifies whether a ServiceAccount should be created -## @param serviceAccount.name Name of the service account to use. If not set and create is true, a name is generated using the fullname template. -## @param serviceAccount.automountServiceAccountToken Automount service account token for the server service account -## @param serviceAccount.annotations Annotations for service account. Evaluated as a template. Only used if `create` is `true`. -## -serviceAccount: - ## DEPRECATED: serviceAccount.enabled - Use serviceAccount.create instead + ## @param persistence.selector Selector to match an existing Persistent Volume for InfluxDB(TM) Core data PVC. If set, the PVC can't have a PV dynamically provisioned for it + ## selector: + ## matchLabels: + ## app: my-app ## - #enabled: false - create: true - name: "" - automountServiceAccountToken: false - annotations: {} -## Pod Security Policy -## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ -## @param psp.create Whether to create a PodSecurityPolicy. WARNING: PodSecurityPolicy is deprecated in Kubernetes v1.21 or later, unavailable in v1.25 or later -## -psp: - create: false -## Role Based Access -## Ref: https://kubernetes.io/docs/admin/authorization/rbac/ -## @param rbac.create Create Role and RoleBinding (required for PSP to work) -## -rbac: - create: false -## @section Volume permissions parameters + selector: {} + ## @param persistence.mountPath Mount path of the InfluxDB(TM) Core data volume + ## + mountPath: /bitnami/influxdb -## Init Container parameters -## Change the owner and group of the persistent volume mountpoint to 'runAsUser:fsGroup' -## values from the securityContext section. -## -volumePermissions: - ## @param volumePermissions.enabled Enable init container that changes the owner and group of the persistent volume mountpoint to `runAsUser:fsGroup` - ## - enabled: false - ## @param volumePermissions.image.registry [default: REGISTRY_NAME] Init container volume-permissions image registry - ## @param volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] Init container volume-permissions image name - ## @skip volumePermissions.image.tag Init container volume-permissions image tag - ## @param volumePermissions.image.digest Init container volume-permissions image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param volumePermissions.image.pullPolicy Init container volume-permissions image pull policy - ## @param volumePermissions.image.pullSecrets Specify docker-registry secret names as an array - ## - image: - registry: docker.io - repository: bitnami/os-shell - tag: 12-debian-12-r45 - digest: "" - ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## Init container Security Context - ## Note: the chown of the data folder is done to securityContext.runAsUser - ## and not the below volumePermissions.securityContext.runAsUser - ## When runAsUser is set to special value "auto", init container will try to chwon the - ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` - ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). - ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with - ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false - ## @param volumePermissions.securityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param volumePermissions.securityContext.runAsUser User ID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") - ## - securityContext: - seLinuxOptions: {} - runAsUser: 0 -## @section InfluxDB(TM) backup parameters -backup: - ## @param backup.enabled Enable InfluxDB(TM) backup - ## - enabled: false - ## @param backup.directory Directory where backups are stored - ## - directory: "/backups" - ## @param backup.retentionDays Retention time in days for backups (older backups are deleted) - ## - retentionDays: 10 +## @section Default init-containers - ## Persistence parameters +defaultInitContainers: + ## 'volume-permissions' init-container + ## Used to change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each node ## - persistence: - ## @param backup.persistence.ownConfig Prefer independent own persistence parameters to configure the backup volume - ## When set to `false` (for backwards compatibility), the rest of the persistence parameters below will be ignored. - ## This parameter will be set to `true` and removed in a future release. + volumePermissions: + ## @param defaultInitContainers.volumePermissions.enabled Enable init-container that changes the owner and group of the persistent volume ## - ownConfig: false - ## @param backup.persistence.enabled Enable data persistence for backup volume + enabled: false + ## @param defaultInitContainers.volumePermissions.image.registry [default: REGISTRY_NAME] "volume-permissions" init-containers' image registry + ## @param defaultInitContainers.volumePermissions.image.repository [default: REPOSITORY_NAME/os-shell] "volume-permissions" init-containers' image repository + ## @skip defaultInitContainers.volumePermissions.image.tag "volume-permissions" init-containers' image tag (immutable tags are recommended) + ## @param defaultInitContainers.volumePermissions.image.digest "volume-permissions" init-containers' image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param defaultInitContainers.volumePermissions.image.pullPolicy "volume-permissions" init-containers' image pull policy + ## @param defaultInitContainers.volumePermissions.image.pullSecrets "volume-permissions" init-containers' image pull secrets ## - enabled: true - ## @param backup.persistence.existingClaim Use a existing PVC which must be created manually before bound - ## If defined, PVC must be created manually before volume will be bound - ## The value is evaluated as a template - ## - existingClaim: "" - ## @param backup.persistence.storageClass Specify the `storageClass` used to provision the volume - ## If defined, storageClassName: - ## If set to "-", storageClassName: "", which disables dynamic provisioning - ## If undefined (the default) or set to null, no storageClassName spec is - ## set, choosing the default provisioner. - ## - storageClass: "" - ## @param backup.persistence.accessModes Access mode of data volume - ## - accessModes: - - ReadWriteOnce - ## @param backup.persistence.size Size of data volume - ## - size: 8Gi - ## @param backup.persistence.annotations Persistent Volume Claim annotations - ## - annotations: {} - ## Cronjob configuration - ## This cronjob is used to create InfluxDB(TM) backups - ## - cronjob: - ## @param backup.cronjob.schedule Schedule in Cron format to save snapshots - ## See https://en.wikipedia.org/wiki/Cron - ## - schedule: "0 2 * * *" - ## @param backup.cronjob.historyLimit Number of successful finished jobs to retain - ## - historyLimit: 1 - ## - ## Option to include ca-bundle - ## - caBundle: - ## @param backup.cronjob.caBundle.enabled Boolean flag to enable/disable the inclusion of a CA bundle for backup CronJob. - ## If enabled a configmap should be created with key as ca-bundle.crt and value to be the actual CA Bundle + image: + registry: docker.io + repository: bitnami/os-shell + tag: 12-debian-12-r45 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName ## - enabled: false - ## @param backup.cronjob.caBundle.existingConfigMap Name of the existing ConfigMap that contains the CA bundle for SSL Communication. - ## - existingConfigMap: "" - ## @param backup.cronjob.caBundle.mountPath The path inside the CronJob container where the CA bundle will be mounted. - ## - mountPath: "/opt/ca-certificates/ca-bundle.crt" - ## @param backup.cronjob.caBundle.subPath The filename within the mountPath directory where the CA bundle will be available. - ## - subPath: "ca-bundle.crt" + pullSecrets: [] + ## Configure "volume-permissions" init-container Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container + ## @param defaultInitContainers.volumePermissions.containerSecurityContext.enabled Enable "volume-permissions" init-containers' Security Context + ## @param defaultInitContainers.volumePermissions.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in "volume-permissions" init-containers + ## @param defaultInitContainers.volumePermissions.containerSecurityContext.runAsUser Set runAsUser in "volume-permissions" init-containers' Security Context + ## @param defaultInitContainers.volumePermissions.containerSecurityContext.privileged Set privileged in "volume-permissions" init-containers' Security Context + ## @param defaultInitContainers.volumePermissions.containerSecurityContext.allowPrivilegeEscalation Set allowPrivilegeEscalation in "volume-permissions" init-containers' Security Context + ## @param defaultInitContainers.volumePermissions.containerSecurityContext.capabilities.add List of capabilities to be added in "volume-permissions" init-containers + ## @param defaultInitContainers.volumePermissions.containerSecurityContext.capabilities.drop List of capabilities to be dropped in "volume-permissions" init-containers + ## @param defaultInitContainers.volumePermissions.containerSecurityContext.seccompProfile.type Set seccomp profile in "volume-permissions" init-containers ## - ## @param backup.cronjob.podAnnotations Pod annotations - ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ - ## - podAnnotations: {} - ## K8s Security Context for Backup Cronjob pods - ## https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## @param backup.cronjob.podSecurityContext.enabled Enable security context for InfluxDB(TM) backup pods - ## @param backup.cronjob.podSecurityContext.fsGroupChangePolicy Set filesystem group change policy - ## @param backup.cronjob.podSecurityContext.sysctls Set kernel settings using the sysctl interface - ## @param backup.cronjob.podSecurityContext.supplementalGroups Set filesystem extra groups - ## @param backup.cronjob.podSecurityContext.fsGroup Group ID for the InfluxDB(TM) filesystem - ## - podSecurityContext: - enabled: true - fsGroupChangePolicy: Always - sysctls: [] - supplementalGroups: [] - fsGroup: 1001 - ## K8s Security Context for Backup Cronjob containers - ## https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ - ## @param backup.cronjob.containerSecurityContext.enabled Enabled containers' Security Context - ## @param backup.cronjob.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container - ## @param backup.cronjob.containerSecurityContext.runAsUser Set containers' Security Context runAsUser - ## @param backup.cronjob.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup - ## @param backup.cronjob.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot - ## @param backup.cronjob.containerSecurityContext.privileged Set container's Security Context privileged - ## @param backup.cronjob.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem - ## @param backup.cronjob.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation - ## @param backup.cronjob.containerSecurityContext.capabilities.drop List of capabilities to be dropped - ## @param backup.cronjob.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile containerSecurityContext: enabled: true seLinuxOptions: {} - runAsUser: 1001 - runAsGroup: 1001 - runAsNonRoot: true + runAsUser: 0 privileged: false - readOnlyRootFilesystem: true allowPrivilegeEscalation: false capabilities: + add: [] drop: ["ALL"] seccompProfile: type: "RuntimeDefault" - ## @param backup.cronjob.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if influxdb.resources is set (influxdb.resources is recommended for production). + ## InfluxDB(TM) Core "volume-permissions" init-container resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param defaultInitContainers.volumePermissions.resourcesPreset Set InfluxDB(TM) Core "volume-permissions" init-container resources according to one common preset (allowed values: none, nano, small, medium, large, xlarge, 2xlarge). This is ignored if defaultInitContainers.volumePermissions.resources is set (defaultInitContainers.volumePermissions.resources is recommended for production). ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 ## - resourcesPreset: "none" - ## @param backup.cronjob.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: + resourcesPreset: "nano" + ## @param defaultInitContainers.volumePermissions.resources Set InfluxDB(TM) Core "volume-permissions" init-container requests and limits for different resources like CPU or memory (essential for production workloads) + ## E.g: ## resources: ## requests: ## cpu: 2 @@ -1066,208 +890,188 @@ backup: ## memory: 1024Mi ## resources: {} - ## @param backup.podAffinityPreset Backup ™ Pod affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` - ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + +## @section Other Parameters + +## Service Account for InfluxDB(TM) Core +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## @param serviceAccount.create Enable creation of ServiceAccount for InfluxDB(TM) Core pods +## @param serviceAccount.name Name of the service account to use. If not set and `create` is `true`, a name is generated +## @param serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created +## @param serviceAccount.annotations Additional custom annotations for the ServiceAccount +## +serviceAccount: + create: true + name: "" + automountServiceAccountToken: false + annotations: {} +## Role Based Access Control +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## @param rbac.create Whether to create & use RBAC resources or not +## +rbac: + create: true + +## @section InfluxDB(TM) Core "create-admin-token" K8s Job parameters +## +createAdminTokenJob: + ## @param createAdminTokenJob.enabled Whether to create a random admin token using a K8s job (ignored if `objectStore` is set to `memory` or `auth.enabled` is set to `false`). Warning: do not use this feature if Helm hooks aren't supported in your environment ## - podAffinityPreset: "" - ## @param backup.podAntiAffinityPreset Backup™ Pod anti-affinity preset. Ignored if `affinity` is set. Allowed values: `soft` or `hard` - ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + enabled: true + ## Bitnami Kubectl image + ## @param createAdminTokenJob.image.registry [default: REGISTRY_NAME] Kubectl image registry + ## @param createAdminTokenJob.image.repository [default: REPOSITORY_NAME/os-shell] Kubectl image repository + ## @skip createAdminTokenJob.image.tag Kubectl image tag (immutable tags are recommended) + ## @param createAdminTokenJob.image.digest Kubectl image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag + ## @param createAdminTokenJob.image.pullPolicy Kubectl image pull policy + ## @param createAdminTokenJob.image.pullSecrets Kubectl image pull secrets ## - podAntiAffinityPreset: soft + image: + registry: docker.io + repository: bitnami/kubectl + tag: 1.33.1-debian-12-r2 + digest: "" + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## e.g: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## @param createAdminTokenJob.backoffLimit set backoff limit of the job + ## + backoffLimit: 10 + ## Configure InfluxDB(TM) Core "create-admin-token" job's containers (only main one) Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod + ## @param createAdminTokenJob.containerSecurityContext.enabled Enable InfluxDB(TM) Core "create-admin-token" job's containers' Security Context + ## @param createAdminTokenJob.containerSecurityContext.seLinuxOptions [object,nullable] Set SELinux options in container + ## @param createAdminTokenJob.containerSecurityContext.runAsUser Set containers' Security Context runAsUser + ## @param createAdminTokenJob.containerSecurityContext.runAsGroup Set containers' Security Context runAsGroup + ## @param createAdminTokenJob.containerSecurityContext.runAsNonRoot Set container's Security Context runAsNonRoot + ## @param createAdminTokenJob.containerSecurityContext.privileged Set container's Security Context privileged + ## @param createAdminTokenJob.containerSecurityContext.readOnlyRootFilesystem Set container's Security Context readOnlyRootFilesystem + ## @param createAdminTokenJob.containerSecurityContext.allowPrivilegeEscalation Set container's Security Context allowPrivilegeEscalation + ## @param createAdminTokenJob.containerSecurityContext.capabilities.drop List of capabilities to be dropped + ## @param createAdminTokenJob.containerSecurityContext.seccompProfile.type Set container's Security Context seccomp profile + ## + containerSecurityContext: + enabled: true + seLinuxOptions: {} + runAsUser: 1001 + runAsGroup: 1001 + runAsNonRoot: true + privileged: false + readOnlyRootFilesystem: true + allowPrivilegeEscalation: false + capabilities: + drop: ["ALL"] + seccompProfile: + type: "RuntimeDefault" + ## InfluxDB(TM) Core "create-admin-token" job's container resource requests and limits + ## ref: http://kubernetes.io/docs/concepts/configuration/manage-compute-resources-container/ + ## @param createAdminTokenJob.resourcesPreset Set InfluxDB(TM) Core "create-admin-token" job's container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if createAdminTokenJob.resources is set (createAdminTokenJob.resources is recommended for production). + ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 + ## + resourcesPreset: "nano" + ## @param createAdminTokenJob.resources Set InfluxDB(TM) Core "create-admin-token" job's container requests and limits for different resources like CPU or memory (essential for production workloads) + ## E.g: + ## resources: + ## requests: + ## cpu: 2 + ## memory: 512Mi + ## limits: + ## cpu: 3 + ## memory: 1024Mi + ## + resources: {} + ## @param createAdminTokenJob.automountServiceAccountToken Mount Service Account token in InfluxDB(TM) Core "create-admin-token" job's pods + ## + automountServiceAccountToken: true + ## @param createAdminTokenJob.hostAliases Add deployment host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## @param createAdminTokenJob.annotations [object] Add annotations to the InfluxDB(TM) Core "create-admin-token" job + ## + annotations: {} + ## @param createAdminTokenJob.podLabels Additional pod labels for InfluxDB(TM) Core "create-admin-token" job + ## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + ## @param createAdminTokenJob.podAnnotations Additional pod annotations for InfluxDB(TM) Core "create-admin-token" job + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ + ## + podAnnotations: {} + ## @param createAdminTokenJob.topologyKey Override common lib default topology key. If empty - "kubernetes.io/hostname" is used + ## i.e. topologyKey: topology.kubernetes.io/zone + ## + topologyKey: "" + ## @param createAdminTokenJob.affinity Affinity for InfluxDB(TM) Core create-admin-token pods assignment (evaluated as a template) + ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## Note: `createAdminTokenJob.podAffinityPreset`, `createAdminTokenJob.podAntiAffinityPreset`, and `createAdminTokenJob.nodeAffinityPreset` will be ignored when it's set + ## + affinity: {} ## Node affinity preset ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#node-affinity + ## @param createAdminTokenJob.nodeAffinityPreset.key Node label key to match. Ignored if `createAdminTokenJob.affinity` is set. + ## @param createAdminTokenJob.nodeAffinityPreset.type Node affinity preset type. Ignored if `createAdminTokenJob.affinity` is set. Allowed values: `soft` or `hard` + ## @param createAdminTokenJob.nodeAffinityPreset.values Node label values to match. Ignored if `createAdminTokenJob.affinity` is set. ## nodeAffinityPreset: - ## @param backup.nodeAffinityPreset.type Backup™ Node affinity preset type. Ignored if `affinity` is set. Allowed values: `soft` or `hard` - type: "" - ## @param backup.nodeAffinityPreset.key Backup™ Node label key to match Ignored if `affinity` is set. - ## E.g. + ## e.g: ## key: "kubernetes.io/e2e-az-name" ## key: "" - ## @param backup.nodeAffinityPreset.values Backup™ Node label values to match. Ignored if `affinity` is set. - ## E.g. + type: "" + ## e.g: ## values: ## - e2e-az1 ## - e2e-az2 ## values: [] - ## @param backup.affinity Backup™ Affinity for backup pod assignment - ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity - ## Note: podAffinityPreset, podAntiAffinityPreset, and nodeAffinityPreset will be ignored when it's set - ## - affinity: {} - ## @param backup.nodeSelector Backup™ Node labels for backup pod assignment + ## @param createAdminTokenJob.nodeSelector Node labels for InfluxDB(TM) Core create-admin-token pods assignment ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/ ## nodeSelector: {} - ## @param backup.tolerations Backup™ Tolerations for backup pod assignment + ## @param createAdminTokenJob.podAffinityPreset Pod affinity preset. Ignored if `createAdminTokenJob.affinity` is set. Allowed values: `soft` or `hard`. + ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAffinityPreset: "" + ## @param createAdminTokenJob.podAntiAffinityPreset Pod anti-affinity preset. Ignored if `createAdminTokenJob.affinity` is set. Allowed values: `soft` or `hard`. + ## Ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#inter-pod-affinity-and-anti-affinity + ## + podAntiAffinityPreset: soft + ## @param createAdminTokenJob.tolerations Tolerations for InfluxDB(TM) Core create-admin-token pods assignment ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ ## tolerations: [] - ## Storage providers where to upload backups + ## @param createAdminTokenJob.topologySpreadConstraints Topology Spread Constraints for pod assignment spread across your cluster among failure-domains. Evaluated as a template + ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/#spread-constraints-for-pods ## - uploadProviders: - ## Google Storage Bucket configuration - ## @param backup.uploadProviders.google.enabled enable upload to google storage bucket - ## @param backup.uploadProviders.google.secret json secret with serviceaccount data to access Google storage bucket - ## @param backup.uploadProviders.google.secretKey service account secret key name - ## @param backup.uploadProviders.google.existingSecret Name of existing secret object with Google serviceaccount json credentials - ## @param backup.uploadProviders.google.bucketName google storage bucket name name - ## - google: - enabled: false - secret: "" - secretKey: "key.json" - existingSecret: "" - bucketName: "gs://bucket/influxdb" - ## Bitnami Google Cloud SDK image - ## ref: https://hub.docker.com/r/bitnami/google-cloud-sdk/tags/ - ## @param backup.uploadProviders.google.image.registry [default: REGISTRY_NAME] Google Cloud SDK image registry - ## @param backup.uploadProviders.google.image.repository [default: REPOSITORY_NAME/google-cloud-sdk] Google Cloud SDK image name - ## @skip backup.uploadProviders.google.image.tag Google Cloud SDK image tag - ## @param backup.uploadProviders.google.image.digest Google Cloud SDK image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param backup.uploadProviders.google.image.pullPolicy Google Cloud SDK image pull policy - ## @param backup.uploadProviders.google.image.pullSecrets Specify docker-registry secret names as an array - ## - image: - registry: docker.io - repository: bitnami/google-cloud-sdk - tag: 0.523.0-debian-12-r0 - digest: "" - ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## @param backup.uploadProviders.google.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if influxdb.resources is set (influxdb.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "none" - ## @param backup.uploadProviders.google.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} - ## Azure Storage Container configuration - ## - azure: - ## @param backup.uploadProviders.azure.enabled Enable upload to azure storage container - ## @param backup.uploadProviders.azure.secret Secret with credentials to access Azure storage - ## @param backup.uploadProviders.azure.secretKey Service account secret key name - ## @param backup.uploadProviders.azure.existingSecret Name of existing secret object - ## @param backup.uploadProviders.azure.containerName Destination container - enabled: false - secret: "" - secretKey: "connection-string" - existingSecret: "" - containerName: "influxdb-container" - ## Bitnami Azure CLI image - ## ref: https://hub.docker.com/r/bitnami/azure-cli/tags/ - ## @param backup.uploadProviders.azure.image.registry [default: REGISTRY_NAME] Azure CLI image registry - ## @param backup.uploadProviders.azure.image.repository [default: REPOSITORY_NAME/azure-cli] Azure CLI image repository - ## @skip backup.uploadProviders.azure.image.tag Azure CLI image tag (immutable tags are recommended) - ## @param backup.uploadProviders.azure.image.digest Azure CLI image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param backup.uploadProviders.azure.image.pullPolicy Azure CLI image pull policy - ## @param backup.uploadProviders.azure.image.pullSecrets Specify docker-registry secret names as an array - ## - image: - registry: docker.io - repository: bitnami/azure-cli - tag: 2.73.0-debian-12-r0 - digest: "" - ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## @param backup.uploadProviders.azure.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if influxdb.resources is set (influxdb.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "none" - ## @param backup.uploadProviders.azure.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} - aws: - ## @param backup.uploadProviders.aws.enabled Enable upload to aws s3 bucket - ## @param backup.uploadProviders.aws.accessKeyID Access Key ID to access aws s3 - ## @param backup.uploadProviders.aws.secretAccessKey Secret Access Key to access aws s3 - ## @param backup.uploadProviders.aws.region Region of aws s3 bucket - ## @param backup.uploadProviders.aws.existingSecret Name of existing secret object - ## @param backup.uploadProviders.aws.bucketName aws s3 bucket name - ## @param backup.uploadProviders.aws.endpoint aws s3 endpoint, no value default public endpoint aws s3 endpoint - ## @param backup.uploadProviders.aws.usePasswordFiles Mount aws s3 credentials as files instead of using environment variables - enabled: false - accessKeyID: "" - secretAccessKey: "" - region: "us-east-1" - existingSecret: "" - bucketName: "s3://bucket/influxdb" - endpoint: "" - usePasswordFiles: true - ## Bitnami AWS CLI image - ## ref: https://hub.docker.com/r/bitnami/aws-cli/tags - ## @param backup.uploadProviders.aws.image.registry [default: REGISTRY_NAME] AWS CLI image registry - ## @param backup.uploadProviders.aws.image.repository [default: REPOSITORY_NAME/aws-cli] AWS CLI image repository - ## @skip backup.uploadProviders.aws.image.tag AWS CLI image tag (immutable tags are recommended) - ## @param backup.uploadProviders.aws.image.digest AWS CLI image digest in the way sha256:aa.... Please note this parameter, if set, will override the tag - ## @param backup.uploadProviders.aws.image.pullPolicy AWS CLI image pull policy - ## @param backup.uploadProviders.aws.image.pullSecrets Specify docker-registry secret names as an array - ## - image: - registry: docker.io - repository: bitnami/aws-cli - tag: 2.27.20-debian-12-r1 - digest: "" - ## ref: https://kubernetes.io/docs/concepts/containers/images/#pre-pulled-images - ## - pullPolicy: IfNotPresent - ## Optionally specify an array of imagePullSecrets. - ## Secrets must be manually created in the namespace. - ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ - ## e.g: - ## pullSecrets: - ## - myRegistryKeySecretName - ## - pullSecrets: [] - ## @param backup.uploadProviders.aws.resourcesPreset Set container resources according to one common preset (allowed values: none, nano, micro, small, medium, large, xlarge, 2xlarge). This is ignored if influxdb.resources is set (influxdb.resources is recommended for production). - ## More information: https://github.com/bitnami/charts/blob/main/bitnami/common/templates/_resources.tpl#L15 - ## - resourcesPreset: "none" - ## @param backup.uploadProviders.aws.resources Set container requests and limits for different resources like CPU or memory (essential for production workloads) - ## Example: - ## resources: - ## requests: - ## cpu: 2 - ## memory: 512Mi - ## limits: - ## cpu: 3 - ## memory: 1024Mi - ## - resources: {} + topologySpreadConstraints: [] + ## @param createAdminTokenJob.priorityClassName Priority Class Name + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/#priorityclass + ## + priorityClassName: "" + ## @param createAdminTokenJob.schedulerName Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + schedulerName: "" + ## @param createAdminTokenJob.terminationGracePeriodSeconds Seconds InfluxDB(TM) Core create-admin-token pod needs to terminate gracefully + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods + ## + terminationGracePeriodSeconds: "" + ## Service Account for InfluxDB(TM) Core create-admin-token pods + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ + ## @param createAdminTokenJob.serviceAccount.create Enable creation of ServiceAccount for InfluxDB(TM) Core create-admin-token pods + ## @param createAdminTokenJob.serviceAccount.name Name of the service account to use. If not set and `create` is `true`, a name is generated + ## @param createAdminTokenJob.serviceAccount.automountServiceAccountToken Allows auto mount of ServiceAccountToken on the serviceAccount created + ## @param createAdminTokenJob.serviceAccount.annotations Additional custom annotations for the ServiceAccount + ## + serviceAccount: + create: true + name: "" + automountServiceAccountToken: true + annotations: {}