diff --git a/bitnami/kafka/3.6/debian-11/Dockerfile b/bitnami/kafka/3.6/debian-11/Dockerfile new file mode 100644 index 000000000000..c2878da2bedb --- /dev/null +++ b/bitnami/kafka/3.6/debian-11/Dockerfile @@ -0,0 +1,62 @@ +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +FROM docker.io/bitnami/minideb:bullseye + +ARG JAVA_EXTRA_SECURITY_DIR="/bitnami/java/extra-security" +ARG TARGETARCH + +LABEL com.vmware.cp.artifact.flavor="sha256:1e1b4657a77f0d47e9220f0c37b9bf7802581b93214fff7d1bd2364c8bf22e8e" \ + org.opencontainers.image.base.name="docker.io/bitnami/minideb:bullseye" \ + org.opencontainers.image.created="2023-10-16T16:30:05Z" \ + org.opencontainers.image.description="Application packaged by VMware, Inc" \ + org.opencontainers.image.licenses="Apache-2.0" \ + org.opencontainers.image.ref.name="3.6.0-debian-11-r0" \ + org.opencontainers.image.title="kafka" \ + org.opencontainers.image.vendor="VMware, Inc." \ + org.opencontainers.image.version="3.6.0" + +ENV HOME="/" \ + OS_ARCH="${TARGETARCH:-amd64}" \ + OS_FLAVOUR="debian-11" \ + OS_NAME="linux" + +COPY prebuildfs / +SHELL ["/bin/bash", "-o", "pipefail", "-c"] +# Install required system packages and dependencies +RUN install_packages ca-certificates curl procps zlib1g +RUN mkdir -p /tmp/bitnami/pkg/cache/ && cd /tmp/bitnami/pkg/cache/ && \ + COMPONENTS=( \ + "wait-for-port-1.0.7-2-linux-${OS_ARCH}-debian-11" \ + "java-17.0.8-7-5-linux-${OS_ARCH}-debian-11" \ + "render-template-1.0.6-2-linux-${OS_ARCH}-debian-11" \ + "kafka-3.6.0-0-linux-${OS_ARCH}-debian-11" \ + ) && \ + for COMPONENT in "${COMPONENTS[@]}"; do \ + if [ ! -f "${COMPONENT}.tar.gz" ]; then \ + curl -SsLf "https://downloads.bitnami.com/files/stacksmith/${COMPONENT}.tar.gz" -O ; \ + curl -SsLf "https://downloads.bitnami.com/files/stacksmith/${COMPONENT}.tar.gz.sha256" -O ; \ + fi && \ + sha256sum -c "${COMPONENT}.tar.gz.sha256" && \ + tar -zxf "${COMPONENT}.tar.gz" -C /opt/bitnami --strip-components=2 --no-same-owner --wildcards '*/files' && \ + rm -rf "${COMPONENT}".tar.gz{,.sha256} ; \ + done +RUN apt-get update && apt-get upgrade -y && \ + apt-get clean && rm -rf /var/lib/apt/lists /var/cache/apt/archives +RUN chmod g+rwX /opt/bitnami +RUN ln -s /opt/bitnami/scripts/kafka/entrypoint.sh /entrypoint.sh +RUN ln -s /opt/bitnami/scripts/kafka/run.sh /run.sh + +COPY rootfs / +RUN /opt/bitnami/scripts/java/postunpack.sh +RUN /opt/bitnami/scripts/kafka/postunpack.sh +ENV APP_VERSION="3.6.0" \ + BITNAMI_APP_NAME="kafka" \ + JAVA_HOME="/opt/bitnami/java" \ + PATH="/opt/bitnami/common/bin:/opt/bitnami/java/bin:/opt/bitnami/kafka/bin:$PATH" + +EXPOSE 9092 + +USER 1001 +ENTRYPOINT [ "/opt/bitnami/scripts/kafka/entrypoint.sh" ] +CMD [ "/opt/bitnami/scripts/kafka/run.sh" ] diff --git a/bitnami/kafka/3.6/debian-11/docker-compose.yml b/bitnami/kafka/3.6/debian-11/docker-compose.yml new file mode 100644 index 000000000000..88466f32b842 --- /dev/null +++ b/bitnami/kafka/3.6/debian-11/docker-compose.yml @@ -0,0 +1,26 @@ +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +version: "2" + +services: + kafka: + image: docker.io/bitnami/kafka:3.6 + ports: + - "9092:9092" + volumes: + - "kafka_data:/bitnami" + environment: + # KRaft settings + - KAFKA_CFG_NODE_ID=0 + - KAFKA_CFG_PROCESS_ROLES=controller,broker + - KAFKA_CFG_CONTROLLER_QUORUM_VOTERS=0@kafka:9093 + # Listeners + - KAFKA_CFG_LISTENERS=PLAINTEXT://:9092,CONTROLLER://:9093 + - KAFKA_CFG_ADVERTISED_LISTENERS=PLAINTEXT://:9092 + - KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP=CONTROLLER:PLAINTEXT,PLAINTEXT:PLAINTEXT + - KAFKA_CFG_CONTROLLER_LISTENER_NAMES=CONTROLLER + - KAFKA_CFG_INTER_BROKER_LISTENER_NAME=PLAINTEXT +volumes: + kafka_data: + driver: local diff --git a/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/.bitnami_components.json b/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/.bitnami_components.json new file mode 100644 index 000000000000..1129f091b709 --- /dev/null +++ b/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/.bitnami_components.json @@ -0,0 +1,26 @@ +{ + "java": { + "arch": "amd64", + "distro": "debian-11", + "type": "NAMI", + "version": "17.0.8-7-5" + }, + "kafka": { + "arch": "amd64", + "distro": "debian-11", + "type": "NAMI", + "version": "3.6.0-0" + }, + "render-template": { + "arch": "amd64", + "distro": "debian-11", + "type": "NAMI", + "version": "1.0.6-2" + }, + "wait-for-port": { + "arch": "amd64", + "distro": "debian-11", + "type": "NAMI", + "version": "1.0.7-2" + } +} \ No newline at end of file diff --git a/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/licenses/licenses.txt b/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/licenses/licenses.txt new file mode 100644 index 000000000000..76956b38e82c --- /dev/null +++ b/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/licenses/licenses.txt @@ -0,0 +1,2 @@ +Bitnami containers ship with software bundles. You can find the licenses under: +/opt/bitnami/[name-of-bundle]/licenses/[bundle-version].txt diff --git a/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libbitnami.sh b/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libbitnami.sh new file mode 100644 index 000000000000..184de8a117e2 --- /dev/null +++ b/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libbitnami.sh @@ -0,0 +1,53 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Bitnami custom library + +# shellcheck disable=SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/liblog.sh + +# Constants +BOLD='\033[1m' + +# Functions + +######################## +# Print the welcome page +# Globals: +# DISABLE_WELCOME_MESSAGE +# BITNAMI_APP_NAME +# Arguments: +# None +# Returns: +# None +######################### +print_welcome_page() { + if [[ -z "${DISABLE_WELCOME_MESSAGE:-}" ]]; then + if [[ -n "$BITNAMI_APP_NAME" ]]; then + print_image_welcome_page + fi + fi +} + +######################## +# Print the welcome page for a Bitnami Docker image +# Globals: +# BITNAMI_APP_NAME +# Arguments: +# None +# Returns: +# None +######################### +print_image_welcome_page() { + local github_url="https://github.com/bitnami/containers" + + log "" + log "${BOLD}Welcome to the Bitnami ${BITNAMI_APP_NAME} container${RESET}" + log "Subscribe to project updates by watching ${BOLD}${github_url}${RESET}" + log "Submit issues and feature requests at ${BOLD}${github_url}/issues${RESET}" + log "" +} + diff --git a/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libfile.sh b/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libfile.sh new file mode 100644 index 000000000000..63759c777f3b --- /dev/null +++ b/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libfile.sh @@ -0,0 +1,141 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Library for managing files + +# shellcheck disable=SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/libos.sh + +# Functions + +######################## +# Replace a regex-matching string in a file +# Arguments: +# $1 - filename +# $2 - match regex +# $3 - substitute regex +# $4 - use POSIX regex. Default: true +# Returns: +# None +######################### +replace_in_file() { + local filename="${1:?filename is required}" + local match_regex="${2:?match regex is required}" + local substitute_regex="${3:?substitute regex is required}" + local posix_regex=${4:-true} + + local result + + # We should avoid using 'sed in-place' substitutions + # 1) They are not compatible with files mounted from ConfigMap(s) + # 2) We found incompatibility issues with Debian10 and "in-place" substitutions + local -r del=$'\001' # Use a non-printable character as a 'sed' delimiter to avoid issues + if [[ $posix_regex = true ]]; then + result="$(sed -E "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")" + else + result="$(sed "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")" + fi + echo "$result" > "$filename" +} + +######################## +# Replace a regex-matching multiline string in a file +# Arguments: +# $1 - filename +# $2 - match regex +# $3 - substitute regex +# Returns: +# None +######################### +replace_in_file_multiline() { + local filename="${1:?filename is required}" + local match_regex="${2:?match regex is required}" + local substitute_regex="${3:?substitute regex is required}" + + local result + local -r del=$'\001' # Use a non-printable character as a 'sed' delimiter to avoid issues + result="$(perl -pe "BEGIN{undef $/;} s${del}${match_regex}${del}${substitute_regex}${del}sg" "$filename")" + echo "$result" > "$filename" +} + +######################## +# Remove a line in a file based on a regex +# Arguments: +# $1 - filename +# $2 - match regex +# $3 - use POSIX regex. Default: true +# Returns: +# None +######################### +remove_in_file() { + local filename="${1:?filename is required}" + local match_regex="${2:?match regex is required}" + local posix_regex=${3:-true} + local result + + # We should avoid using 'sed in-place' substitutions + # 1) They are not compatible with files mounted from ConfigMap(s) + # 2) We found incompatibility issues with Debian10 and "in-place" substitutions + if [[ $posix_regex = true ]]; then + result="$(sed -E "/$match_regex/d" "$filename")" + else + result="$(sed "/$match_regex/d" "$filename")" + fi + echo "$result" > "$filename" +} + +######################## +# Appends text after the last line matching a pattern +# Arguments: +# $1 - file +# $2 - match regex +# $3 - contents to add +# Returns: +# None +######################### +append_file_after_last_match() { + local file="${1:?missing file}" + local match_regex="${2:?missing pattern}" + local value="${3:?missing value}" + + # We read the file in reverse, replace the first match (0,/pattern/s) and then reverse the results again + result="$(tac "$file" | sed -E "0,/($match_regex)/s||${value}\n\1|" | tac)" + echo "$result" > "$file" +} + +######################## +# Wait until certain entry is present in a log file +# Arguments: +# $1 - entry to look for +# $2 - log file +# $3 - max retries. Default: 12 +# $4 - sleep between retries (in seconds). Default: 5 +# Returns: +# Boolean +######################### +wait_for_log_entry() { + local -r entry="${1:-missing entry}" + local -r log_file="${2:-missing log file}" + local -r retries="${3:-12}" + local -r interval_time="${4:-5}" + local attempt=0 + + check_log_file_for_entry() { + if ! grep -qE "$entry" "$log_file"; then + debug "Entry \"${entry}\" still not present in ${log_file} (attempt $((++attempt))/${retries})" + return 1 + fi + } + debug "Checking that ${log_file} log file contains entry \"${entry}\"" + if retry_while check_log_file_for_entry "$retries" "$interval_time"; then + debug "Found entry \"${entry}\" in ${log_file}" + true + else + error "Could not find entry \"${entry}\" in ${log_file} after ${retries} retries" + debug_execute cat "$log_file" + return 1 + fi +} diff --git a/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libfs.sh b/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libfs.sh new file mode 100644 index 000000000000..96b22f99710c --- /dev/null +++ b/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libfs.sh @@ -0,0 +1,193 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Library for file system actions + +# shellcheck disable=SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/liblog.sh + +# Functions + +######################## +# Ensure a file/directory is owned (user and group) but the given user +# Arguments: +# $1 - filepath +# $2 - owner +# Returns: +# None +######################### +owned_by() { + local path="${1:?path is missing}" + local owner="${2:?owner is missing}" + local group="${3:-}" + + if [[ -n $group ]]; then + chown "$owner":"$group" "$path" + else + chown "$owner":"$owner" "$path" + fi +} + +######################## +# Ensure a directory exists and, optionally, is owned by the given user +# Arguments: +# $1 - directory +# $2 - owner +# Returns: +# None +######################### +ensure_dir_exists() { + local dir="${1:?directory is missing}" + local owner_user="${2:-}" + local owner_group="${3:-}" + + [ -d "${dir}" ] || mkdir -p "${dir}" + if [[ -n $owner_user ]]; then + owned_by "$dir" "$owner_user" "$owner_group" + fi +} + +######################## +# Checks whether a directory is empty or not +# arguments: +# $1 - directory +# returns: +# boolean +######################### +is_dir_empty() { + local -r path="${1:?missing directory}" + # Calculate real path in order to avoid issues with symlinks + local -r dir="$(realpath "$path")" + if [[ ! -e "$dir" ]] || [[ -z "$(ls -A "$dir")" ]]; then + true + else + false + fi +} + +######################## +# Checks whether a mounted directory is empty or not +# arguments: +# $1 - directory +# returns: +# boolean +######################### +is_mounted_dir_empty() { + local dir="${1:?missing directory}" + + if is_dir_empty "$dir" || find "$dir" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" -exec false {} +; then + true + else + false + fi +} + +######################## +# Checks whether a file can be written to or not +# arguments: +# $1 - file +# returns: +# boolean +######################### +is_file_writable() { + local file="${1:?missing file}" + local dir + dir="$(dirname "$file")" + + if [[ (-f "$file" && -w "$file") || (! -f "$file" && -d "$dir" && -w "$dir") ]]; then + true + else + false + fi +} + +######################## +# Relativize a path +# arguments: +# $1 - path +# $2 - base +# returns: +# None +######################### +relativize() { + local -r path="${1:?missing path}" + local -r base="${2:?missing base}" + pushd "$base" >/dev/null || exit + realpath -q --no-symlinks --relative-base="$base" "$path" | sed -e 's|^/$|.|' -e 's|^/||' + popd >/dev/null || exit +} + +######################## +# Configure permisions and ownership recursively +# Globals: +# None +# Arguments: +# $1 - paths (as a string). +# Flags: +# -f|--file-mode - mode for directories. +# -d|--dir-mode - mode for files. +# -u|--user - user +# -g|--group - group +# Returns: +# None +######################### +configure_permissions_ownership() { + local -r paths="${1:?paths is missing}" + local dir_mode="" + local file_mode="" + local user="" + local group="" + + # Validate arguments + shift 1 + while [ "$#" -gt 0 ]; do + case "$1" in + -f | --file-mode) + shift + file_mode="${1:?missing mode for files}" + ;; + -d | --dir-mode) + shift + dir_mode="${1:?missing mode for directories}" + ;; + -u | --user) + shift + user="${1:?missing user}" + ;; + -g | --group) + shift + group="${1:?missing group}" + ;; + *) + echo "Invalid command line flag $1" >&2 + return 1 + ;; + esac + shift + done + + read -r -a filepaths <<<"$paths" + for p in "${filepaths[@]}"; do + if [[ -e "$p" ]]; then + find -L "$p" -printf "" + if [[ -n $dir_mode ]]; then + find -L "$p" -type d ! -perm "$dir_mode" -print0 | xargs -r -0 chmod "$dir_mode" + fi + if [[ -n $file_mode ]]; then + find -L "$p" -type f ! -perm "$file_mode" -print0 | xargs -r -0 chmod "$file_mode" + fi + if [[ -n $user ]] && [[ -n $group ]]; then + find -L "$p" -print0 | xargs -r -0 chown "${user}:${group}" + elif [[ -n $user ]] && [[ -z $group ]]; then + find -L "$p" -print0 | xargs -r -0 chown "${user}" + elif [[ -z $user ]] && [[ -n $group ]]; then + find -L "$p" -print0 | xargs -r -0 chgrp "${group}" + fi + else + stderr_print "$p does not exist" + fi + done +} diff --git a/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libhook.sh b/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libhook.sh new file mode 100644 index 000000000000..dadd06149e00 --- /dev/null +++ b/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libhook.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Library to use for scripts expected to be used as Kubernetes lifecycle hooks + +# shellcheck disable=SC1091 + +# Load generic libraries +. /opt/bitnami/scripts/liblog.sh +. /opt/bitnami/scripts/libos.sh + +# Override functions that log to stdout/stderr of the current process, so they print to process 1 +for function_to_override in stderr_print debug_execute; do + # Output is sent to output of process 1 and thus end up in the container log + # The hook output in general isn't saved + eval "$(declare -f "$function_to_override") >/proc/1/fd/1 2>/proc/1/fd/2" +done diff --git a/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/liblog.sh b/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/liblog.sh new file mode 100644 index 000000000000..2a9e76a4d725 --- /dev/null +++ b/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/liblog.sh @@ -0,0 +1,114 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Library for logging functions + +# Constants +RESET='\033[0m' +RED='\033[38;5;1m' +GREEN='\033[38;5;2m' +YELLOW='\033[38;5;3m' +MAGENTA='\033[38;5;5m' +CYAN='\033[38;5;6m' + +# Functions + +######################## +# Print to STDERR +# Arguments: +# Message to print +# Returns: +# None +######################### +stderr_print() { + # 'is_boolean_yes' is defined in libvalidations.sh, but depends on this file so we cannot source it + local bool="${BITNAMI_QUIET:-false}" + # comparison is performed without regard to the case of alphabetic characters + shopt -s nocasematch + if ! [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then + printf "%b\\n" "${*}" >&2 + fi +} + +######################## +# Log message +# Arguments: +# Message to log +# Returns: +# None +######################### +log() { + stderr_print "${CYAN}${MODULE:-} ${MAGENTA}$(date "+%T.%2N ")${RESET}${*}" +} +######################## +# Log an 'info' message +# Arguments: +# Message to log +# Returns: +# None +######################### +info() { + log "${GREEN}INFO ${RESET} ==> ${*}" +} +######################## +# Log message +# Arguments: +# Message to log +# Returns: +# None +######################### +warn() { + log "${YELLOW}WARN ${RESET} ==> ${*}" +} +######################## +# Log an 'error' message +# Arguments: +# Message to log +# Returns: +# None +######################### +error() { + log "${RED}ERROR${RESET} ==> ${*}" +} +######################## +# Log a 'debug' message +# Globals: +# BITNAMI_DEBUG +# Arguments: +# None +# Returns: +# None +######################### +debug() { + # 'is_boolean_yes' is defined in libvalidations.sh, but depends on this file so we cannot source it + local bool="${BITNAMI_DEBUG:-false}" + # comparison is performed without regard to the case of alphabetic characters + shopt -s nocasematch + if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then + log "${MAGENTA}DEBUG${RESET} ==> ${*}" + fi +} + +######################## +# Indent a string +# Arguments: +# $1 - string +# $2 - number of indentation characters (default: 4) +# $3 - indentation character (default: " ") +# Returns: +# None +######################### +indent() { + local string="${1:-}" + local num="${2:?missing num}" + local char="${3:-" "}" + # Build the indentation unit string + local indent_unit="" + for ((i = 0; i < num; i++)); do + indent_unit="${indent_unit}${char}" + done + # shellcheck disable=SC2001 + # Complex regex, see https://github.com/koalaman/shellcheck/wiki/SC2001#exceptions + echo "$string" | sed "s/^/${indent_unit}/" +} diff --git a/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libnet.sh b/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libnet.sh new file mode 100644 index 000000000000..b47c69a56825 --- /dev/null +++ b/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libnet.sh @@ -0,0 +1,165 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Library for network functions + +# shellcheck disable=SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/liblog.sh + +# Functions + +######################## +# Resolve IP address for a host/domain (i.e. DNS lookup) +# Arguments: +# $1 - Hostname to resolve +# $2 - IP address version (v4, v6), leave empty for resolving to any version +# Returns: +# IP +######################### +dns_lookup() { + local host="${1:?host is missing}" + local ip_version="${2:-}" + getent "ahosts${ip_version}" "$host" | awk '/STREAM/ {print $1 }' | head -n 1 +} + +######################### +# Wait for a hostname and return the IP +# Arguments: +# $1 - hostname +# $2 - number of retries +# $3 - seconds to wait between retries +# Returns: +# - IP address that corresponds to the hostname +######################### +wait_for_dns_lookup() { + local hostname="${1:?hostname is missing}" + local retries="${2:-5}" + local seconds="${3:-1}" + check_host() { + if [[ $(dns_lookup "$hostname") == "" ]]; then + false + else + true + fi + } + # Wait for the host to be ready + retry_while "check_host ${hostname}" "$retries" "$seconds" + dns_lookup "$hostname" +} + +######################## +# Get machine's IP +# Arguments: +# None +# Returns: +# Machine IP +######################### +get_machine_ip() { + local -a ip_addresses + local hostname + hostname="$(hostname)" + read -r -a ip_addresses <<< "$(dns_lookup "$hostname" | xargs echo)" + if [[ "${#ip_addresses[@]}" -gt 1 ]]; then + warn "Found more than one IP address associated to hostname ${hostname}: ${ip_addresses[*]}, will use ${ip_addresses[0]}" + elif [[ "${#ip_addresses[@]}" -lt 1 ]]; then + error "Could not find any IP address associated to hostname ${hostname}" + exit 1 + fi + echo "${ip_addresses[0]}" +} + +######################## +# Check if the provided argument is a resolved hostname +# Arguments: +# $1 - Value to check +# Returns: +# Boolean +######################### +is_hostname_resolved() { + local -r host="${1:?missing value}" + if [[ -n "$(dns_lookup "$host")" ]]; then + true + else + false + fi +} + +######################## +# Parse URL +# Globals: +# None +# Arguments: +# $1 - uri - String +# $2 - component to obtain. Valid options (scheme, authority, userinfo, host, port, path, query or fragment) - String +# Returns: +# String +parse_uri() { + local uri="${1:?uri is missing}" + local component="${2:?component is missing}" + + # Solution based on https://tools.ietf.org/html/rfc3986#appendix-B with + # additional sub-expressions to split authority into userinfo, host and port + # Credits to Patryk Obara (see https://stackoverflow.com/a/45977232/6694969) + local -r URI_REGEX='^(([^:/?#]+):)?(//((([^@/?#]+)@)?([^:/?#]+)(:([0-9]+))?))?(/([^?#]*))?(\?([^#]*))?(#(.*))?' + # || | ||| | | | | | | | | | + # |2 scheme | ||6 userinfo 7 host | 9 port | 11 rpath | 13 query | 15 fragment + # 1 scheme: | |5 userinfo@ 8 :... 10 path 12 ?... 14 #... + # | 4 authority + # 3 //... + local index=0 + case "$component" in + scheme) + index=2 + ;; + authority) + index=4 + ;; + userinfo) + index=6 + ;; + host) + index=7 + ;; + port) + index=9 + ;; + path) + index=10 + ;; + query) + index=13 + ;; + fragment) + index=14 + ;; + *) + stderr_print "unrecognized component $component" + return 1 + ;; + esac + [[ "$uri" =~ $URI_REGEX ]] && echo "${BASH_REMATCH[${index}]}" +} + +######################## +# Wait for a HTTP connection to succeed +# Globals: +# * +# Arguments: +# $1 - URL to wait for +# $2 - Maximum amount of retries (optional) +# $3 - Time between retries (optional) +# Returns: +# true if the HTTP connection succeeded, false otherwise +######################### +wait_for_http_connection() { + local url="${1:?missing url}" + local retries="${2:-}" + local sleep_time="${3:-}" + if ! retry_while "debug_execute curl --silent ${url}" "$retries" "$sleep_time"; then + error "Could not connect to ${url}" + return 1 + fi +} diff --git a/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libos.sh b/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libos.sh new file mode 100644 index 000000000000..c0500acee78d --- /dev/null +++ b/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libos.sh @@ -0,0 +1,657 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Library for operating system actions + +# shellcheck disable=SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/liblog.sh +. /opt/bitnami/scripts/libfs.sh +. /opt/bitnami/scripts/libvalidations.sh + +# Functions + +######################## +# Check if an user exists in the system +# Arguments: +# $1 - user +# Returns: +# Boolean +######################### +user_exists() { + local user="${1:?user is missing}" + id "$user" >/dev/null 2>&1 +} + +######################## +# Check if a group exists in the system +# Arguments: +# $1 - group +# Returns: +# Boolean +######################### +group_exists() { + local group="${1:?group is missing}" + getent group "$group" >/dev/null 2>&1 +} + +######################## +# Create a group in the system if it does not exist already +# Arguments: +# $1 - group +# Flags: +# -i|--gid - the ID for the new group +# -s|--system - Whether to create new user as system user (uid <= 999) +# Returns: +# None +######################### +ensure_group_exists() { + local group="${1:?group is missing}" + local gid="" + local is_system_user=false + + # Validate arguments + shift 1 + while [ "$#" -gt 0 ]; do + case "$1" in + -i | --gid) + shift + gid="${1:?missing gid}" + ;; + -s | --system) + is_system_user=true + ;; + *) + echo "Invalid command line flag $1" >&2 + return 1 + ;; + esac + shift + done + + if ! group_exists "$group"; then + local -a args=("$group") + if [[ -n "$gid" ]]; then + if group_exists "$gid"; then + error "The GID $gid is already in use." >&2 + return 1 + fi + args+=("--gid" "$gid") + fi + $is_system_user && args+=("--system") + groupadd "${args[@]}" >/dev/null 2>&1 + fi +} + +######################## +# Create an user in the system if it does not exist already +# Arguments: +# $1 - user +# Flags: +# -i|--uid - the ID for the new user +# -g|--group - the group the new user should belong to +# -a|--append-groups - comma-separated list of supplemental groups to append to the new user +# -h|--home - the home directory for the new user +# -s|--system - whether to create new user as system user (uid <= 999) +# Returns: +# None +######################### +ensure_user_exists() { + local user="${1:?user is missing}" + local uid="" + local group="" + local append_groups="" + local home="" + local is_system_user=false + + # Validate arguments + shift 1 + while [ "$#" -gt 0 ]; do + case "$1" in + -i | --uid) + shift + uid="${1:?missing uid}" + ;; + -g | --group) + shift + group="${1:?missing group}" + ;; + -a | --append-groups) + shift + append_groups="${1:?missing append_groups}" + ;; + -h | --home) + shift + home="${1:?missing home directory}" + ;; + -s | --system) + is_system_user=true + ;; + *) + echo "Invalid command line flag $1" >&2 + return 1 + ;; + esac + shift + done + + if ! user_exists "$user"; then + local -a user_args=("-N" "$user") + if [[ -n "$uid" ]]; then + if user_exists "$uid"; then + error "The UID $uid is already in use." + return 1 + fi + user_args+=("--uid" "$uid") + else + $is_system_user && user_args+=("--system") + fi + useradd "${user_args[@]}" >/dev/null 2>&1 + fi + + if [[ -n "$group" ]]; then + local -a group_args=("$group") + $is_system_user && group_args+=("--system") + ensure_group_exists "${group_args[@]}" + usermod -g "$group" "$user" >/dev/null 2>&1 + fi + + if [[ -n "$append_groups" ]]; then + local -a groups + read -ra groups <<<"$(tr ',;' ' ' <<<"$append_groups")" + for group in "${groups[@]}"; do + ensure_group_exists "$group" + usermod -aG "$group" "$user" >/dev/null 2>&1 + done + fi + + if [[ -n "$home" ]]; then + mkdir -p "$home" + usermod -d "$home" "$user" >/dev/null 2>&1 + configure_permissions_ownership "$home" -d "775" -f "664" -u "$user" -g "$group" + fi +} + +######################## +# Check if the script is currently running as root +# Arguments: +# $1 - user +# $2 - group +# Returns: +# Boolean +######################### +am_i_root() { + if [[ "$(id -u)" = "0" ]]; then + true + else + false + fi +} + +######################## +# Print OS metadata +# Arguments: +# $1 - Flag name +# Flags: +# --id - Distro ID +# --version - Distro version +# --branch - Distro branch +# --codename - Distro codename +# --name - Distro name +# --pretty-name - Distro pretty name +# Returns: +# String +######################### +get_os_metadata() { + local -r flag_name="${1:?missing flag}" + # Helper function + get_os_release_metadata() { + local -r env_name="${1:?missing environment variable name}" + ( + . /etc/os-release + echo "${!env_name}" + ) + } + case "$flag_name" in + --id) + get_os_release_metadata ID + ;; + --version) + get_os_release_metadata VERSION_ID + ;; + --branch) + get_os_release_metadata VERSION_ID | sed 's/\..*//' + ;; + --codename) + get_os_release_metadata VERSION_CODENAME + ;; + --name) + get_os_release_metadata NAME + ;; + --pretty-name) + get_os_release_metadata PRETTY_NAME + ;; + *) + error "Unknown flag ${flag_name}" + return 1 + ;; + esac +} + +######################## +# Get total memory available +# Arguments: +# None +# Returns: +# Memory in bytes +######################### +get_total_memory() { + echo $(($(grep MemTotal /proc/meminfo | awk '{print $2}') / 1024)) +} + +######################## +# Get machine size depending on specified memory +# Globals: +# None +# Arguments: +# None +# Flags: +# --memory - memory size (optional) +# Returns: +# Detected instance size +######################### +get_machine_size() { + local memory="" + # Validate arguments + while [[ "$#" -gt 0 ]]; do + case "$1" in + --memory) + shift + memory="${1:?missing memory}" + ;; + *) + echo "Invalid command line flag $1" >&2 + return 1 + ;; + esac + shift + done + if [[ -z "$memory" ]]; then + debug "Memory was not specified, detecting available memory automatically" + memory="$(get_total_memory)" + fi + sanitized_memory=$(convert_to_mb "$memory") + if [[ "$sanitized_memory" -gt 26000 ]]; then + echo 2xlarge + elif [[ "$sanitized_memory" -gt 13000 ]]; then + echo xlarge + elif [[ "$sanitized_memory" -gt 6000 ]]; then + echo large + elif [[ "$sanitized_memory" -gt 3000 ]]; then + echo medium + elif [[ "$sanitized_memory" -gt 1500 ]]; then + echo small + else + echo micro + fi +} + +######################## +# Get machine size depending on specified memory +# Globals: +# None +# Arguments: +# $1 - memory size (optional) +# Returns: +# Detected instance size +######################### +get_supported_machine_sizes() { + echo micro small medium large xlarge 2xlarge +} + +######################## +# Convert memory size from string to amount of megabytes (i.e. 2G -> 2048) +# Globals: +# None +# Arguments: +# $1 - memory size +# Returns: +# Result of the conversion +######################### +convert_to_mb() { + local amount="${1:-}" + if [[ $amount =~ ^([0-9]+)(m|M|g|G) ]]; then + size="${BASH_REMATCH[1]}" + unit="${BASH_REMATCH[2]}" + if [[ "$unit" = "g" || "$unit" = "G" ]]; then + amount="$((size * 1024))" + else + amount="$size" + fi + fi + echo "$amount" +} + +######################### +# Redirects output to /dev/null if debug mode is disabled +# Globals: +# BITNAMI_DEBUG +# Arguments: +# $@ - Command to execute +# Returns: +# None +######################### +debug_execute() { + if is_boolean_yes "${BITNAMI_DEBUG:-false}"; then + "$@" + else + "$@" >/dev/null 2>&1 + fi +} + +######################## +# Retries a command a given number of times +# Arguments: +# $1 - cmd (as a string) +# $2 - max retries. Default: 12 +# $3 - sleep between retries (in seconds). Default: 5 +# Returns: +# Boolean +######################### +retry_while() { + local cmd="${1:?cmd is missing}" + local retries="${2:-12}" + local sleep_time="${3:-5}" + local return_value=1 + + read -r -a command <<<"$cmd" + for ((i = 1; i <= retries; i += 1)); do + "${command[@]}" && return_value=0 && break + sleep "$sleep_time" + done + return $return_value +} + +######################## +# Generate a random string +# Arguments: +# -t|--type - String type (ascii, alphanumeric, numeric), defaults to ascii +# -c|--count - Number of characters, defaults to 32 +# Arguments: +# None +# Returns: +# None +# Returns: +# String +######################### +generate_random_string() { + local type="ascii" + local count="32" + local filter + local result + # Validate arguments + while [[ "$#" -gt 0 ]]; do + case "$1" in + -t | --type) + shift + type="$1" + ;; + -c | --count) + shift + count="$1" + ;; + *) + echo "Invalid command line flag $1" >&2 + return 1 + ;; + esac + shift + done + # Validate type + case "$type" in + ascii) + filter="[:print:]" + ;; + numeric) + filter="0-9" + ;; + alphanumeric) + filter="a-zA-Z0-9" + ;; + alphanumeric+special|special+alphanumeric) + # Limit variety of special characters, so there is a higher chance of containing more alphanumeric characters + # Special characters are harder to write, and it could impact the overall UX if most passwords are too complex + filter='a-zA-Z0-9:@.,/+!=' + ;; + *) + echo "Invalid type ${type}" >&2 + return 1 + ;; + esac + # Obtain count + 10 lines from /dev/urandom to ensure that the resulting string has the expected size + # Note there is a very small chance of strings starting with EOL character + # Therefore, the higher amount of lines read, this will happen less frequently + result="$(head -n "$((count + 10))" /dev/urandom | tr -dc "$filter" | head -c "$count")" + echo "$result" +} + +######################## +# Create md5 hash from a string +# Arguments: +# $1 - string +# Returns: +# md5 hash - string +######################### +generate_md5_hash() { + local -r str="${1:?missing input string}" + echo -n "$str" | md5sum | awk '{print $1}' +} + +######################## +# Create sha1 hash from a string +# Arguments: +# $1 - string +# $2 - algorithm - 1 (default), 224, 256, 384, 512 +# Returns: +# sha1 hash - string +######################### +generate_sha_hash() { + local -r str="${1:?missing input string}" + local -r algorithm="${2:-1}" + echo -n "$str" | "sha${algorithm}sum" | awk '{print $1}' +} + +######################## +# Converts a string to its hexadecimal representation +# Arguments: +# $1 - string +# Returns: +# hexadecimal representation of the string +######################### +convert_to_hex() { + local -r str=${1:?missing input string} + local -i iterator + local char + for ((iterator = 0; iterator < ${#str}; iterator++)); do + char=${str:iterator:1} + printf '%x' "'${char}" + done +} + +######################## +# Get boot time +# Globals: +# None +# Arguments: +# None +# Returns: +# Boot time metadata +######################### +get_boot_time() { + stat /proc --format=%Y +} + +######################## +# Get machine ID +# Globals: +# None +# Arguments: +# None +# Returns: +# Machine ID +######################### +get_machine_id() { + local machine_id + if [[ -f /etc/machine-id ]]; then + machine_id="$(cat /etc/machine-id)" + fi + if [[ -z "$machine_id" ]]; then + # Fallback to the boot-time, which will at least ensure a unique ID in the current session + machine_id="$(get_boot_time)" + fi + echo "$machine_id" +} + +######################## +# Get the root partition's disk device ID (e.g. /dev/sda1) +# Globals: +# None +# Arguments: +# None +# Returns: +# Root partition disk ID +######################### +get_disk_device_id() { + local device_id="" + if grep -q ^/dev /proc/mounts; then + device_id="$(grep ^/dev /proc/mounts | awk '$2 == "/" { print $1 }' | tail -1)" + fi + # If it could not be autodetected, fallback to /dev/sda1 as a default + if [[ -z "$device_id" || ! -b "$device_id" ]]; then + device_id="/dev/sda1" + fi + echo "$device_id" +} + +######################## +# Get the root disk device ID (e.g. /dev/sda) +# Globals: +# None +# Arguments: +# None +# Returns: +# Root disk ID +######################### +get_root_disk_device_id() { + get_disk_device_id | sed -E 's/p?[0-9]+$//' +} + +######################## +# Get the root disk size in bytes +# Globals: +# None +# Arguments: +# None +# Returns: +# Root disk size in bytes +######################### +get_root_disk_size() { + fdisk -l "$(get_root_disk_device_id)" | grep 'Disk.*bytes' | sed -E 's/.*, ([0-9]+) bytes,.*/\1/' || true +} + +######################## +# Run command as a specific user and group (optional) +# Arguments: +# $1 - USER(:GROUP) to switch to +# $2..$n - command to execute +# Returns: +# Exit code of the specified command +######################### +run_as_user() { + run_chroot "$@" +} + +######################## +# Execute command as a specific user and group (optional), +# replacing the current process image +# Arguments: +# $1 - USER(:GROUP) to switch to +# $2..$n - command to execute +# Returns: +# Exit code of the specified command +######################### +exec_as_user() { + run_chroot --replace-process "$@" +} + +######################## +# Run a command using chroot +# Arguments: +# $1 - USER(:GROUP) to switch to +# $2..$n - command to execute +# Flags: +# -r | --replace-process - Replace the current process image (optional) +# Returns: +# Exit code of the specified command +######################### +run_chroot() { + local userspec + local user + local homedir + local replace=false + local -r cwd="$(pwd)" + + # Parse and validate flags + while [[ "$#" -gt 0 ]]; do + case "$1" in + -r | --replace-process) + replace=true + ;; + --) + shift + break + ;; + -*) + stderr_print "unrecognized flag $1" + return 1 + ;; + *) + break + ;; + esac + shift + done + + # Parse and validate arguments + if [[ "$#" -lt 2 ]]; then + echo "expected at least 2 arguments" + return 1 + else + userspec=$1 + shift + + # userspec can optionally include the group, so we parse the user + user=$(echo "$userspec" | cut -d':' -f1) + fi + + if ! am_i_root; then + error "Could not switch to '${userspec}': Operation not permitted" + return 1 + fi + + # Get the HOME directory for the user to switch, as chroot does + # not properly update this env and some scripts rely on it + homedir=$(eval echo "~${user}") + if [[ ! -d $homedir ]]; then + homedir="${HOME:-/}" + fi + + # Obtaining value for "$@" indirectly in order to properly support shell parameter expansion + if [[ "$replace" = true ]]; then + exec chroot --userspec="$userspec" / bash -c "cd ${cwd}; export HOME=${homedir}; exec \"\$@\"" -- "$@" + else + chroot --userspec="$userspec" / bash -c "cd ${cwd}; export HOME=${homedir}; exec \"\$@\"" -- "$@" + fi +} diff --git a/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libpersistence.sh b/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libpersistence.sh new file mode 100644 index 000000000000..af6af64d6dd0 --- /dev/null +++ b/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libpersistence.sh @@ -0,0 +1,124 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Bitnami persistence library +# Used for bringing persistence capabilities to applications that don't have clear separation of data and logic + +# shellcheck disable=SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/libfs.sh +. /opt/bitnami/scripts/libos.sh +. /opt/bitnami/scripts/liblog.sh +. /opt/bitnami/scripts/libversion.sh + +# Functions + +######################## +# Persist an application directory +# Globals: +# BITNAMI_ROOT_DIR +# BITNAMI_VOLUME_DIR +# Arguments: +# $1 - App folder name +# $2 - List of app files to persist +# Returns: +# true if all steps succeeded, false otherwise +######################### +persist_app() { + local -r app="${1:?missing app}" + local -a files_to_restore + read -r -a files_to_persist <<< "$(tr ',;:' ' ' <<< "$2")" + local -r install_dir="${BITNAMI_ROOT_DIR}/${app}" + local -r persist_dir="${BITNAMI_VOLUME_DIR}/${app}" + # Persist the individual files + if [[ "${#files_to_persist[@]}" -le 0 ]]; then + warn "No files are configured to be persisted" + return + fi + pushd "$install_dir" >/dev/null || exit + local file_to_persist_relative file_to_persist_destination file_to_persist_destination_folder + local -r tmp_file="/tmp/perms.acl" + for file_to_persist in "${files_to_persist[@]}"; do + if [[ ! -f "$file_to_persist" && ! -d "$file_to_persist" ]]; then + error "Cannot persist '${file_to_persist}' because it does not exist" + return 1 + fi + file_to_persist_relative="$(relativize "$file_to_persist" "$install_dir")" + file_to_persist_destination="${persist_dir}/${file_to_persist_relative}" + file_to_persist_destination_folder="$(dirname "$file_to_persist_destination")" + # Get original permissions for existing files, which will be applied later + # Exclude the root directory with 'sed', to avoid issues when copying the entirety of it to a volume + getfacl -R "$file_to_persist_relative" | sed -E '/# file: (\..+|[^.])/,$!d' > "$tmp_file" + # Copy directories to the volume + ensure_dir_exists "$file_to_persist_destination_folder" + cp -Lr --preserve=links "$file_to_persist_relative" "$file_to_persist_destination_folder" + # Restore permissions + pushd "$persist_dir" >/dev/null || exit + if am_i_root; then + setfacl --restore="$tmp_file" + else + # When running as non-root, don't change ownership + setfacl --restore=<(grep -E -v '^# (owner|group):' "$tmp_file") + fi + popd >/dev/null || exit + done + popd >/dev/null || exit + rm -f "$tmp_file" + # Install the persisted files into the installation directory, via symlinks + restore_persisted_app "$@" +} + +######################## +# Restore a persisted application directory +# Globals: +# BITNAMI_ROOT_DIR +# BITNAMI_VOLUME_DIR +# FORCE_MAJOR_UPGRADE +# Arguments: +# $1 - App folder name +# $2 - List of app files to restore +# Returns: +# true if all steps succeeded, false otherwise +######################### +restore_persisted_app() { + local -r app="${1:?missing app}" + local -a files_to_restore + read -r -a files_to_restore <<< "$(tr ',;:' ' ' <<< "$2")" + local -r install_dir="${BITNAMI_ROOT_DIR}/${app}" + local -r persist_dir="${BITNAMI_VOLUME_DIR}/${app}" + # Restore the individual persisted files + if [[ "${#files_to_restore[@]}" -le 0 ]]; then + warn "No persisted files are configured to be restored" + return + fi + local file_to_restore_relative file_to_restore_origin file_to_restore_destination + for file_to_restore in "${files_to_restore[@]}"; do + file_to_restore_relative="$(relativize "$file_to_restore" "$install_dir")" + # We use 'realpath --no-symlinks' to ensure that the case of '.' is covered and the directory is removed + file_to_restore_origin="$(realpath --no-symlinks "${install_dir}/${file_to_restore_relative}")" + file_to_restore_destination="$(realpath --no-symlinks "${persist_dir}/${file_to_restore_relative}")" + rm -rf "$file_to_restore_origin" + ln -sfn "$file_to_restore_destination" "$file_to_restore_origin" + done +} + +######################## +# Check if an application directory was already persisted +# Globals: +# BITNAMI_VOLUME_DIR +# Arguments: +# $1 - App folder name +# Returns: +# true if all steps succeeded, false otherwise +######################### +is_app_initialized() { + local -r app="${1:?missing app}" + local -r persist_dir="${BITNAMI_VOLUME_DIR}/${app}" + if ! is_mounted_dir_empty "$persist_dir"; then + true + else + false + fi +} diff --git a/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libservice.sh b/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libservice.sh new file mode 100644 index 000000000000..107f54e6b5c9 --- /dev/null +++ b/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libservice.sh @@ -0,0 +1,496 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Library for managing services + +# shellcheck disable=SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/libvalidations.sh +. /opt/bitnami/scripts/liblog.sh + +# Functions + +######################## +# Read the provided pid file and returns a PID +# Arguments: +# $1 - Pid file +# Returns: +# PID +######################### +get_pid_from_file() { + local pid_file="${1:?pid file is missing}" + + if [[ -f "$pid_file" ]]; then + if [[ -n "$(< "$pid_file")" ]] && [[ "$(< "$pid_file")" -gt 0 ]]; then + echo "$(< "$pid_file")" + fi + fi +} + +######################## +# Check if a provided PID corresponds to a running service +# Arguments: +# $1 - PID +# Returns: +# Boolean +######################### +is_service_running() { + local pid="${1:?pid is missing}" + + kill -0 "$pid" 2>/dev/null +} + +######################## +# Stop a service by sending a termination signal to its pid +# Arguments: +# $1 - Pid file +# $2 - Signal number (optional) +# Returns: +# None +######################### +stop_service_using_pid() { + local pid_file="${1:?pid file is missing}" + local signal="${2:-}" + local pid + + pid="$(get_pid_from_file "$pid_file")" + [[ -z "$pid" ]] || ! is_service_running "$pid" && return + + if [[ -n "$signal" ]]; then + kill "-${signal}" "$pid" + else + kill "$pid" + fi + + local counter=10 + while [[ "$counter" -ne 0 ]] && is_service_running "$pid"; do + sleep 1 + counter=$((counter - 1)) + done +} + +######################## +# Start cron daemon +# Arguments: +# None +# Returns: +# true if started correctly, false otherwise +######################### +cron_start() { + if [[ -x "/usr/sbin/cron" ]]; then + /usr/sbin/cron + elif [[ -x "/usr/sbin/crond" ]]; then + /usr/sbin/crond + else + false + fi +} + +######################## +# Generate a cron configuration file for a given service +# Arguments: +# $1 - Service name +# $2 - Command +# Flags: +# --run-as - User to run as (default: root) +# --schedule - Cron schedule configuration (default: * * * * *) +# Returns: +# None +######################### +generate_cron_conf() { + local service_name="${1:?service name is missing}" + local cmd="${2:?command is missing}" + local run_as="root" + local schedule="* * * * *" + local clean="true" + + # Parse optional CLI flags + shift 2 + while [[ "$#" -gt 0 ]]; do + case "$1" in + --run-as) + shift + run_as="$1" + ;; + --schedule) + shift + schedule="$1" + ;; + --no-clean) + clean="false" + ;; + *) + echo "Invalid command line flag ${1}" >&2 + return 1 + ;; + esac + shift + done + + mkdir -p /etc/cron.d + if "$clean"; then + cat > "/etc/cron.d/${service_name}" <> /etc/cron.d/"$service_name" + fi +} + +######################## +# Remove a cron configuration file for a given service +# Arguments: +# $1 - Service name +# Returns: +# None +######################### +remove_cron_conf() { + local service_name="${1:?service name is missing}" + local cron_conf_dir="/etc/monit/conf.d" + rm -f "${cron_conf_dir}/${service_name}" +} + +######################## +# Generate a monit configuration file for a given service +# Arguments: +# $1 - Service name +# $2 - Pid file +# $3 - Start command +# $4 - Stop command +# Flags: +# --disable - Whether to disable the monit configuration +# Returns: +# None +######################### +generate_monit_conf() { + local service_name="${1:?service name is missing}" + local pid_file="${2:?pid file is missing}" + local start_command="${3:?start command is missing}" + local stop_command="${4:?stop command is missing}" + local monit_conf_dir="/etc/monit/conf.d" + local disabled="no" + + # Parse optional CLI flags + shift 4 + while [[ "$#" -gt 0 ]]; do + case "$1" in + --disable) + disabled="yes" + ;; + *) + echo "Invalid command line flag ${1}" >&2 + return 1 + ;; + esac + shift + done + + is_boolean_yes "$disabled" && conf_suffix=".disabled" + mkdir -p "$monit_conf_dir" + cat > "${monit_conf_dir}/${service_name}.conf${conf_suffix:-}" <&2 + return 1 + ;; + esac + shift + done + + mkdir -p "$logrotate_conf_dir" + cat < "${logrotate_conf_dir}/${service_name}" +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +${log_path} { + ${period} + rotate ${rotations} + dateext + compress + copytruncate + missingok +$(indent "$extra" 2) +} +EOF +} + +######################## +# Remove a logrotate configuration file +# Arguments: +# $1 - Service name +# Returns: +# None +######################### +remove_logrotate_conf() { + local service_name="${1:?service name is missing}" + local logrotate_conf_dir="/etc/logrotate.d" + rm -f "${logrotate_conf_dir}/${service_name}" +} + +######################## +# Generate a Systemd configuration file +# Arguments: +# $1 - Service name +# Flags: +# --custom-service-content - Custom content to add to the [service] block +# --environment - Environment variable to define (multiple --environment options may be passed) +# --environment-file - Text file with environment variables (multiple --environment-file options may be passed) +# --exec-start - Start command (required) +# --exec-start-pre - Pre-start command (optional) +# --exec-start-post - Post-start command (optional) +# --exec-stop - Stop command (optional) +# --exec-reload - Reload command (optional) +# --group - System group to start the service with +# --name - Service full name (e.g. Apache HTTP Server, defaults to $1) +# --restart - When to restart the Systemd service after being stopped (defaults to always) +# --pid-file - Service PID file +# --standard-output - File where to print stdout output +# --standard-error - File where to print stderr output +# --success-exit-status - Exit code that indicates a successful shutdown +# --type - Systemd unit type (defaults to forking) +# --user - System user to start the service with +# --working-directory - Working directory at which to start the service +# Returns: +# None +######################### +generate_systemd_conf() { + local -r service_name="${1:?service name is missing}" + local -r systemd_units_dir="/etc/systemd/system" + local -r service_file="${systemd_units_dir}/bitnami.${service_name}.service" + # Default values + local name="$service_name" + local type="forking" + local user="" + local group="" + local environment="" + local environment_file="" + local exec_start="" + local exec_start_pre="" + local exec_start_post="" + local exec_stop="" + local exec_reload="" + local restart="always" + local pid_file="" + local standard_output="journal" + local standard_error="" + local limits_content="" + local success_exit_status="" + local custom_service_content="" + local working_directory="" + # Parse CLI flags + shift + while [[ "$#" -gt 0 ]]; do + case "$1" in + --name \ + | --type \ + | --user \ + | --group \ + | --exec-start \ + | --exec-stop \ + | --exec-reload \ + | --restart \ + | --pid-file \ + | --standard-output \ + | --standard-error \ + | --success-exit-status \ + | --custom-service-content \ + | --working-directory \ + ) + var_name="$(echo "$1" | sed -e "s/^--//" -e "s/-/_/g")" + shift + declare "$var_name"="${1:?"${var_name} value is missing"}" + ;; + --limit-*) + [[ -n "$limits_content" ]] && limits_content+=$'\n' + var_name="${1//--limit-}" + shift + limits_content+="Limit${var_name^^}=${1:?"--limit-${var_name} value is missing"}" + ;; + --exec-start-pre) + shift + [[ -n "$exec_start_pre" ]] && exec_start_pre+=$'\n' + exec_start_pre+="ExecStartPre=${1:?"--exec-start-pre value is missing"}" + ;; + --exec-start-post) + shift + [[ -n "$exec_start_post" ]] && exec_start_post+=$'\n' + exec_start_post+="ExecStartPost=${1:?"--exec-start-post value is missing"}" + ;; + --environment) + shift + # It is possible to add multiple environment lines + [[ -n "$environment" ]] && environment+=$'\n' + environment+="Environment=${1:?"--environment value is missing"}" + ;; + --environment-file) + shift + # It is possible to add multiple environment-file lines + [[ -n "$environment_file" ]] && environment_file+=$'\n' + environment_file+="EnvironmentFile=${1:?"--environment-file value is missing"}" + ;; + *) + echo "Invalid command line flag ${1}" >&2 + return 1 + ;; + esac + shift + done + # Validate inputs + local error="no" + if [[ -z "$exec_start" ]]; then + error "The --exec-start option is required" + error="yes" + fi + if [[ "$error" != "no" ]]; then + return 1 + fi + # Generate the Systemd unit + cat > "$service_file" <> "$service_file" <<< "WorkingDirectory=${working_directory}" + fi + if [[ -n "$exec_start_pre" ]]; then + # This variable may contain multiple ExecStartPre= directives + cat >> "$service_file" <<< "$exec_start_pre" + fi + if [[ -n "$exec_start" ]]; then + cat >> "$service_file" <<< "ExecStart=${exec_start}" + fi + if [[ -n "$exec_start_post" ]]; then + # This variable may contain multiple ExecStartPost= directives + cat >> "$service_file" <<< "$exec_start_post" + fi + # Optional stop and reload commands + if [[ -n "$exec_stop" ]]; then + cat >> "$service_file" <<< "ExecStop=${exec_stop}" + fi + if [[ -n "$exec_reload" ]]; then + cat >> "$service_file" <<< "ExecReload=${exec_reload}" + fi + # User and group + if [[ -n "$user" ]]; then + cat >> "$service_file" <<< "User=${user}" + fi + if [[ -n "$group" ]]; then + cat >> "$service_file" <<< "Group=${group}" + fi + # PID file allows to determine if the main process is running properly (for Restart=always) + if [[ -n "$pid_file" ]]; then + cat >> "$service_file" <<< "PIDFile=${pid_file}" + fi + if [[ -n "$restart" ]]; then + cat >> "$service_file" <<< "Restart=${restart}" + fi + # Environment flags + if [[ -n "$environment" ]]; then + # This variable may contain multiple Environment= directives + cat >> "$service_file" <<< "$environment" + fi + if [[ -n "$environment_file" ]]; then + # This variable may contain multiple EnvironmentFile= directives + cat >> "$service_file" <<< "$environment_file" + fi + # Logging + if [[ -n "$standard_output" ]]; then + cat >> "$service_file" <<< "StandardOutput=${standard_output}" + fi + if [[ -n "$standard_error" ]]; then + cat >> "$service_file" <<< "StandardError=${standard_error}" + fi + if [[ -n "$custom_service_content" ]]; then + # This variable may contain multiple miscellaneous directives + cat >> "$service_file" <<< "$custom_service_content" + fi + if [[ -n "$success_exit_status" ]]; then + cat >> "$service_file" <> "$service_file" <> "$service_file" <> "$service_file" <= 0 )); then + true + else + false + fi +} + +######################## +# Check if the provided argument is a boolean or is the string 'yes/true' +# Arguments: +# $1 - Value to check +# Returns: +# Boolean +######################### +is_boolean_yes() { + local -r bool="${1:-}" + # comparison is performed without regard to the case of alphabetic characters + shopt -s nocasematch + if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then + true + else + false + fi +} + +######################## +# Check if the provided argument is a boolean yes/no value +# Arguments: +# $1 - Value to check +# Returns: +# Boolean +######################### +is_yes_no_value() { + local -r bool="${1:-}" + if [[ "$bool" =~ ^(yes|no)$ ]]; then + true + else + false + fi +} + +######################## +# Check if the provided argument is a boolean true/false value +# Arguments: +# $1 - Value to check +# Returns: +# Boolean +######################### +is_true_false_value() { + local -r bool="${1:-}" + if [[ "$bool" =~ ^(true|false)$ ]]; then + true + else + false + fi +} + +######################## +# Check if the provided argument is a boolean 1/0 value +# Arguments: +# $1 - Value to check +# Returns: +# Boolean +######################### +is_1_0_value() { + local -r bool="${1:-}" + if [[ "$bool" =~ ^[10]$ ]]; then + true + else + false + fi +} + +######################## +# Check if the provided argument is an empty string or not defined +# Arguments: +# $1 - Value to check +# Returns: +# Boolean +######################### +is_empty_value() { + local -r val="${1:-}" + if [[ -z "$val" ]]; then + true + else + false + fi +} + +######################## +# Validate if the provided argument is a valid port +# Arguments: +# $1 - Port to validate +# Returns: +# Boolean and error message +######################### +validate_port() { + local value + local unprivileged=0 + + # Parse flags + while [[ "$#" -gt 0 ]]; do + case "$1" in + -unprivileged) + unprivileged=1 + ;; + --) + shift + break + ;; + -*) + stderr_print "unrecognized flag $1" + return 1 + ;; + *) + break + ;; + esac + shift + done + + if [[ "$#" -gt 1 ]]; then + echo "too many arguments provided" + return 2 + elif [[ "$#" -eq 0 ]]; then + stderr_print "missing port argument" + return 1 + else + value=$1 + fi + + if [[ -z "$value" ]]; then + echo "the value is empty" + return 1 + else + if ! is_int "$value"; then + echo "value is not an integer" + return 2 + elif [[ "$value" -lt 0 ]]; then + echo "negative value provided" + return 2 + elif [[ "$value" -gt 65535 ]]; then + echo "requested port is greater than 65535" + return 2 + elif [[ "$unprivileged" = 1 && "$value" -lt 1024 ]]; then + echo "privileged port requested" + return 3 + fi + fi +} + +######################## +# Validate if the provided argument is a valid IPv6 address +# Arguments: +# $1 - IP to validate +# Returns: +# Boolean +######################### +validate_ipv6() { + local ip="${1:?ip is missing}" + local stat=1 + local full_address_regex='^([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}$' + local short_address_regex='^((([0-9a-fA-F]{1,4}:){0,6}[0-9a-fA-F]{1,4}){0,6}::(([0-9a-fA-F]{1,4}:){0,6}[0-9a-fA-F]{1,4}){0,6})$' + + if [[ $ip =~ $full_address_regex || $ip =~ $short_address_regex || $ip == "::" ]]; then + stat=0 + fi + return $stat +} + +######################## +# Validate if the provided argument is a valid IPv4 address +# Arguments: +# $1 - IP to validate +# Returns: +# Boolean +######################### +validate_ipv4() { + local ip="${1:?ip is missing}" + local stat=1 + + if [[ $ip =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then + read -r -a ip_array <<< "$(tr '.' ' ' <<< "$ip")" + [[ ${ip_array[0]} -le 255 && ${ip_array[1]} -le 255 \ + && ${ip_array[2]} -le 255 && ${ip_array[3]} -le 255 ]] + stat=$? + fi + return $stat +} + +######################## +# Validate if the provided argument is a valid IPv4 or IPv6 address +# Arguments: +# $1 - IP to validate +# Returns: +# Boolean +######################### +validate_ip() { + local ip="${1:?ip is missing}" + local stat=1 + + if validate_ipv4 "$ip"; then + stat=0 + else + stat=$(validate_ipv6 "$ip") + fi + return $stat +} + +######################## +# Validate a string format +# Arguments: +# $1 - String to validate +# Returns: +# Boolean +######################### +validate_string() { + local string + local min_length=-1 + local max_length=-1 + + # Parse flags + while [ "$#" -gt 0 ]; do + case "$1" in + -min-length) + shift + min_length=${1:-} + ;; + -max-length) + shift + max_length=${1:-} + ;; + --) + shift + break + ;; + -*) + stderr_print "unrecognized flag $1" + return 1 + ;; + *) + break + ;; + esac + shift + done + + if [ "$#" -gt 1 ]; then + stderr_print "too many arguments provided" + return 2 + elif [ "$#" -eq 0 ]; then + stderr_print "missing string" + return 1 + else + string=$1 + fi + + if [[ "$min_length" -ge 0 ]] && [[ "${#string}" -lt "$min_length" ]]; then + echo "string length is less than $min_length" + return 1 + fi + if [[ "$max_length" -ge 0 ]] && [[ "${#string}" -gt "$max_length" ]]; then + echo "string length is great than $max_length" + return 1 + fi +} diff --git a/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libversion.sh b/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libversion.sh new file mode 100644 index 000000000000..6ca71ac7bdbb --- /dev/null +++ b/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libversion.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Library for managing versions strings + +# shellcheck disable=SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/liblog.sh + +# Functions +######################## +# Gets semantic version +# Arguments: +# $1 - version: string to extract major.minor.patch +# $2 - section: 1 to extract major, 2 to extract minor, 3 to extract patch +# Returns: +# array with the major, minor and release +######################### +get_sematic_version () { + local version="${1:?version is required}" + local section="${2:?section is required}" + local -a version_sections + + #Regex to parse versions: x.y.z + local -r regex='([0-9]+)(\.([0-9]+)(\.([0-9]+))?)?' + + if [[ "$version" =~ $regex ]]; then + local i=1 + local j=1 + local n=${#BASH_REMATCH[*]} + + while [[ $i -lt $n ]]; do + if [[ -n "${BASH_REMATCH[$i]}" ]] && [[ "${BASH_REMATCH[$i]:0:1}" != '.' ]]; then + version_sections[j]="${BASH_REMATCH[$i]}" + ((j++)) + fi + ((i++)) + done + + local number_regex='^[0-9]+$' + if [[ "$section" =~ $number_regex ]] && (( section > 0 )) && (( section <= 3 )); then + echo "${version_sections[$section]}" + return + else + stderr_print "Section allowed values are: 1, 2, and 3" + return 1 + fi + fi +} diff --git a/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libwebserver.sh b/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libwebserver.sh new file mode 100644 index 000000000000..8023f9b0549a --- /dev/null +++ b/bitnami/kafka/3.6/debian-11/prebuildfs/opt/bitnami/scripts/libwebserver.sh @@ -0,0 +1,476 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Bitnami web server handler library + +# shellcheck disable=SC1090,SC1091 + +# Load generic libraries +. /opt/bitnami/scripts/liblog.sh + +######################## +# Execute a command (or list of commands) with the web server environment and library loaded +# Globals: +# * +# Arguments: +# None +# Returns: +# None +######################### +web_server_execute() { + local -r web_server="${1:?missing web server}" + shift + # Run program in sub-shell to avoid web server environment getting loaded when not necessary + ( + . "/opt/bitnami/scripts/lib${web_server}.sh" + . "/opt/bitnami/scripts/${web_server}-env.sh" + "$@" + ) +} + +######################## +# Prints the list of enabled web servers +# Globals: +# None +# Arguments: +# None +# Returns: +# None +######################### +web_server_list() { + local -r -a supported_web_servers=(apache nginx) + local -a existing_web_servers=() + for web_server in "${supported_web_servers[@]}"; do + [[ -f "/opt/bitnami/scripts/${web_server}-env.sh" ]] && existing_web_servers+=("$web_server") + done + echo "${existing_web_servers[@]:-}" +} + +######################## +# Prints the currently-enabled web server type (only one, in order of preference) +# Globals: +# None +# Arguments: +# None +# Returns: +# None +######################### +web_server_type() { + local -a web_servers + read -r -a web_servers <<< "$(web_server_list)" + echo "${web_servers[0]:-}" +} + +######################## +# Validate that a supported web server is configured +# Globals: +# None +# Arguments: +# None +# Returns: +# None +######################### +web_server_validate() { + local error_code=0 + local supported_web_servers=("apache" "nginx") + + # Auxiliary functions + print_validation_error() { + error "$1" + error_code=1 + } + + if [[ -z "$(web_server_type)" || ! " ${supported_web_servers[*]} " == *" $(web_server_type) "* ]]; then + print_validation_error "Could not detect any supported web servers. It must be one of: ${supported_web_servers[*]}" + elif ! web_server_execute "$(web_server_type)" type -t "is_$(web_server_type)_running" >/dev/null; then + print_validation_error "Could not load the $(web_server_type) web server library from /opt/bitnami/scripts. Check that it exists and is readable." + fi + + return "$error_code" +} + +######################## +# Check whether the web server is running +# Globals: +# * +# Arguments: +# None +# Returns: +# true if the web server is running, false otherwise +######################### +is_web_server_running() { + "is_$(web_server_type)_running" +} + +######################## +# Start web server +# Globals: +# * +# Arguments: +# None +# Returns: +# None +######################### +web_server_start() { + info "Starting $(web_server_type) in background" + if [[ "${BITNAMI_SERVICE_MANAGER:-}" = "systemd" ]]; then + systemctl start "bitnami.$(web_server_type).service" + else + "${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/start.sh" + fi +} + +######################## +# Stop web server +# Globals: +# * +# Arguments: +# None +# Returns: +# None +######################### +web_server_stop() { + info "Stopping $(web_server_type)" + if [[ "${BITNAMI_SERVICE_MANAGER:-}" = "systemd" ]]; then + systemctl stop "bitnami.$(web_server_type).service" + else + "${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/stop.sh" + fi +} + +######################## +# Restart web server +# Globals: +# * +# Arguments: +# None +# Returns: +# None +######################### +web_server_restart() { + info "Restarting $(web_server_type)" + if [[ "${BITNAMI_SERVICE_MANAGER:-}" = "systemd" ]]; then + systemctl restart "bitnami.$(web_server_type).service" + else + "${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/restart.sh" + fi +} + +######################## +# Reload web server +# Globals: +# * +# Arguments: +# None +# Returns: +# None +######################### +web_server_reload() { + if [[ "${BITNAMI_SERVICE_MANAGER:-}" = "systemd" ]]; then + systemctl reload "bitnami.$(web_server_type).service" + else + "${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/reload.sh" + fi +} + +######################## +# Ensure a web server application configuration exists (i.e. Apache virtual host format or NGINX server block) +# It serves as a wrapper for the specific web server function +# Globals: +# * +# Arguments: +# $1 - App name +# Flags: +# --type - Application type, which has an effect on which configuration template to use +# --hosts - Host listen addresses +# --server-name - Server name +# --server-aliases - Server aliases +# --allow-remote-connections - Whether to allow remote connections or to require local connections +# --disable - Whether to render server configurations with a .disabled prefix +# --disable-http - Whether to render the app's HTTP server configuration with a .disabled prefix +# --disable-https - Whether to render the app's HTTPS server configuration with a .disabled prefix +# --http-port - HTTP port number +# --https-port - HTTPS port number +# --document-root - Path to document root directory +# Apache-specific flags: +# --apache-additional-configuration - Additional vhost configuration (no default) +# --apache-additional-http-configuration - Additional HTTP vhost configuration (no default) +# --apache-additional-https-configuration - Additional HTTPS vhost configuration (no default) +# --apache-before-vhost-configuration - Configuration to add before the directive (no default) +# --apache-allow-override - Whether to allow .htaccess files (only allowed when --move-htaccess is set to 'no' and type is not defined) +# --apache-extra-directory-configuration - Extra configuration for the document root directory +# --apache-proxy-address - Address where to proxy requests +# --apache-proxy-configuration - Extra configuration for the proxy +# --apache-proxy-http-configuration - Extra configuration for the proxy HTTP vhost +# --apache-proxy-https-configuration - Extra configuration for the proxy HTTPS vhost +# --apache-move-htaccess - Move .htaccess files to a common place so they can be loaded during Apache startup (only allowed when type is not defined) +# NGINX-specific flags: +# --nginx-additional-configuration - Additional server block configuration (no default) +# --nginx-external-configuration - Configuration external to server block (no default) +# Returns: +# true if the configuration was enabled, false otherwise +######################## +ensure_web_server_app_configuration_exists() { + local app="${1:?missing app}" + shift + local -a apache_args nginx_args web_servers args_var + apache_args=("$app") + nginx_args=("$app") + # Validate arguments + while [[ "$#" -gt 0 ]]; do + case "$1" in + # Common flags + --disable \ + | --disable-http \ + | --disable-https \ + ) + apache_args+=("$1") + nginx_args+=("$1") + ;; + --hosts \ + | --server-name \ + | --server-aliases \ + | --type \ + | --allow-remote-connections \ + | --http-port \ + | --https-port \ + | --document-root \ + ) + apache_args+=("$1" "${2:?missing value}") + nginx_args+=("$1" "${2:?missing value}") + shift + ;; + + # Specific Apache flags + --apache-additional-configuration \ + | --apache-additional-http-configuration \ + | --apache-additional-https-configuration \ + | --apache-before-vhost-configuration \ + | --apache-allow-override \ + | --apache-extra-directory-configuration \ + | --apache-proxy-address \ + | --apache-proxy-configuration \ + | --apache-proxy-http-configuration \ + | --apache-proxy-https-configuration \ + | --apache-move-htaccess \ + ) + apache_args+=("${1//apache-/}" "${2:?missing value}") + shift + ;; + + # Specific NGINX flags + --nginx-additional-configuration \ + | --nginx-external-configuration) + nginx_args+=("${1//nginx-/}" "${2:?missing value}") + shift + ;; + + *) + echo "Invalid command line flag $1" >&2 + return 1 + ;; + esac + shift + done + read -r -a web_servers <<< "$(web_server_list)" + for web_server in "${web_servers[@]}"; do + args_var="${web_server}_args[@]" + web_server_execute "$web_server" "ensure_${web_server}_app_configuration_exists" "${!args_var}" + done +} + +######################## +# Ensure a web server application configuration does not exist anymore (i.e. Apache virtual host format or NGINX server block) +# It serves as a wrapper for the specific web server function +# Globals: +# * +# Arguments: +# $1 - App name +# Returns: +# true if the configuration was disabled, false otherwise +######################## +ensure_web_server_app_configuration_not_exists() { + local app="${1:?missing app}" + local -a web_servers + read -r -a web_servers <<< "$(web_server_list)" + for web_server in "${web_servers[@]}"; do + web_server_execute "$web_server" "ensure_${web_server}_app_configuration_not_exists" "$app" + done +} + +######################## +# Ensure the web server loads the configuration for an application in a URL prefix +# It serves as a wrapper for the specific web server function +# Globals: +# * +# Arguments: +# $1 - App name +# Flags: +# --allow-remote-connections - Whether to allow remote connections or to require local connections +# --document-root - Path to document root directory +# --prefix - URL prefix from where it will be accessible (i.e. /myapp) +# --type - Application type, which has an effect on what configuration template will be used +# Apache-specific flags: +# --apache-additional-configuration - Additional vhost configuration (no default) +# --apache-allow-override - Whether to allow .htaccess files (only allowed when --move-htaccess is set to 'no') +# --apache-extra-directory-configuration - Extra configuration for the document root directory +# --apache-move-htaccess - Move .htaccess files to a common place so they can be loaded during Apache startup +# NGINX-specific flags: +# --nginx-additional-configuration - Additional server block configuration (no default) +# Returns: +# true if the configuration was enabled, false otherwise +######################## +ensure_web_server_prefix_configuration_exists() { + local app="${1:?missing app}" + shift + local -a apache_args nginx_args web_servers args_var + apache_args=("$app") + nginx_args=("$app") + # Validate arguments + while [[ "$#" -gt 0 ]]; do + case "$1" in + # Common flags + --allow-remote-connections \ + | --document-root \ + | --prefix \ + | --type \ + ) + apache_args+=("$1" "${2:?missing value}") + nginx_args+=("$1" "${2:?missing value}") + shift + ;; + + # Specific Apache flags + --apache-additional-configuration \ + | --apache-allow-override \ + | --apache-extra-directory-configuration \ + | --apache-move-htaccess \ + ) + apache_args+=("${1//apache-/}" "$2") + shift + ;; + + # Specific NGINX flags + --nginx-additional-configuration) + nginx_args+=("${1//nginx-/}" "$2") + shift + ;; + + *) + echo "Invalid command line flag $1" >&2 + return 1 + ;; + esac + shift + done + read -r -a web_servers <<< "$(web_server_list)" + for web_server in "${web_servers[@]}"; do + args_var="${web_server}_args[@]" + web_server_execute "$web_server" "ensure_${web_server}_prefix_configuration_exists" "${!args_var}" + done +} + +######################## +# Ensure a web server application configuration is updated with the runtime configuration (i.e. ports) +# It serves as a wrapper for the specific web server function +# Globals: +# * +# Arguments: +# $1 - App name +# Flags: +# --hosts - Host listen addresses +# --server-name - Server name +# --server-aliases - Server aliases +# --enable-http - Enable HTTP app configuration (if not enabled already) +# --enable-https - Enable HTTPS app configuration (if not enabled already) +# --disable-http - Disable HTTP app configuration (if not disabled already) +# --disable-https - Disable HTTPS app configuration (if not disabled already) +# --http-port - HTTP port number +# --https-port - HTTPS port number +# Returns: +# true if the configuration was updated, false otherwise +######################## +web_server_update_app_configuration() { + local app="${1:?missing app}" + shift + local -a args web_servers + args=("$app") + # Validate arguments + while [[ "$#" -gt 0 ]]; do + case "$1" in + # Common flags + --enable-http \ + | --enable-https \ + | --disable-http \ + | --disable-https \ + ) + args+=("$1") + ;; + --hosts \ + | --server-name \ + | --server-aliases \ + | --http-port \ + | --https-port \ + ) + args+=("$1" "${2:?missing value}") + shift + ;; + + *) + echo "Invalid command line flag $1" >&2 + return 1 + ;; + esac + shift + done + read -r -a web_servers <<< "$(web_server_list)" + for web_server in "${web_servers[@]}"; do + web_server_execute "$web_server" "${web_server}_update_app_configuration" "${args[@]}" + done +} + +######################## +# Enable loading page, which shows users that the initialization process is not yet completed +# Globals: +# * +# Arguments: +# None +# Returns: +# None +######################### +web_server_enable_loading_page() { + ensure_web_server_app_configuration_exists "__loading" --hosts "_default_" \ + --apache-additional-configuration " +# Show a HTTP 503 Service Unavailable page by default +RedirectMatch 503 ^/$ +# Show index.html if server is answering with 404 Not Found or 503 Service Unavailable status codes +ErrorDocument 404 /index.html +ErrorDocument 503 /index.html" \ + --nginx-additional-configuration " +# Show a HTTP 503 Service Unavailable page by default +location / { + return 503; +} +# Show index.html if server is answering with 404 Not Found or 503 Service Unavailable status codes +error_page 404 @installing; +error_page 503 @installing; +location @installing { + rewrite ^(.*)$ /index.html break; +}" + web_server_reload +} + +######################## +# Enable loading page, which shows users that the initialization process is not yet completed +# Globals: +# * +# Arguments: +# None +# Returns: +# None +######################### +web_server_disable_install_page() { + ensure_web_server_app_configuration_not_exists "__loading" + web_server_reload +} diff --git a/bitnami/kafka/3.6/debian-11/prebuildfs/usr/sbin/install_packages b/bitnami/kafka/3.6/debian-11/prebuildfs/usr/sbin/install_packages new file mode 100755 index 000000000000..acbc3173208c --- /dev/null +++ b/bitnami/kafka/3.6/debian-11/prebuildfs/usr/sbin/install_packages @@ -0,0 +1,27 @@ +#!/bin/sh +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +set -eu + +n=0 +max=2 +export DEBIAN_FRONTEND=noninteractive + +until [ $n -gt $max ]; do + set +e + ( + apt-get update -qq && + apt-get install -y --no-install-recommends "$@" + ) + CODE=$? + set -e + if [ $CODE -eq 0 ]; then + break + fi + if [ $n -eq $max ]; then + exit $CODE + fi + echo "apt failed, retrying" + n=$(($n + 1)) +done +apt-get clean && rm -rf /var/lib/apt/lists /var/cache/apt/archives diff --git a/bitnami/kafka/3.6/debian-11/prebuildfs/usr/sbin/run-script b/bitnami/kafka/3.6/debian-11/prebuildfs/usr/sbin/run-script new file mode 100755 index 000000000000..4ca0f897277e --- /dev/null +++ b/bitnami/kafka/3.6/debian-11/prebuildfs/usr/sbin/run-script @@ -0,0 +1,24 @@ +#!/bin/sh +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +set -u + +if [ $# -eq 0 ]; then + >&2 echo "No arguments provided" + exit 1 +fi + +script=$1 +exit_code="${2:-96}" +fail_if_not_present="${3:-n}" + +if test -f "$script"; then + sh $script + + if [ $? -ne 0 ]; then + exit $((exit_code)) + fi +elif [ "$fail_if_not_present" = "y" ]; then + >&2 echo "script not found: $script" + exit 127 +fi diff --git a/bitnami/kafka/3.6/debian-11/rootfs/opt/bitnami/scripts/java/entrypoint.sh b/bitnami/kafka/3.6/debian-11/rootfs/opt/bitnami/scripts/java/entrypoint.sh new file mode 100755 index 000000000000..c3a1e2383fa1 --- /dev/null +++ b/bitnami/kafka/3.6/debian-11/rootfs/opt/bitnami/scripts/java/entrypoint.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +# shellcheck disable=SC1091 + +set -o errexit +set -o nounset +set -o pipefail +# set -o xtrace # Uncomment this line for debugging purposes + +# Load libraries +. /opt/bitnami/scripts/libbitnami.sh +. /opt/bitnami/scripts/liblog.sh + +print_welcome_page + +echo "" +exec "$@" diff --git a/bitnami/kafka/3.6/debian-11/rootfs/opt/bitnami/scripts/java/postunpack.sh b/bitnami/kafka/3.6/debian-11/rootfs/opt/bitnami/scripts/java/postunpack.sh new file mode 100755 index 000000000000..52dbf4f13673 --- /dev/null +++ b/bitnami/kafka/3.6/debian-11/rootfs/opt/bitnami/scripts/java/postunpack.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +# shellcheck disable=SC1091 + +set -o errexit +set -o nounset +set -o pipefail +# set -o xtrace # Uncomment this line for debugging purposes + +# Load libraries +. /opt/bitnami/scripts/libfile.sh +. /opt/bitnami/scripts/liblog.sh + +# +# Java post-unpack operations +# + +# Override default files in the Java security directory. This is used for +# custom base images (with custom CA certificates or block lists is used) + +if [[ -n "${JAVA_EXTRA_SECURITY_DIR:-}" ]] && ! is_dir_empty "$JAVA_EXTRA_SECURITY_DIR"; then + info "Adding custom CAs to the Java security folder" + cp -Lr "${JAVA_EXTRA_SECURITY_DIR}/." /opt/bitnami/java/lib/security +fi diff --git a/bitnami/kafka/3.6/debian-11/rootfs/opt/bitnami/scripts/kafka-env.sh b/bitnami/kafka/3.6/debian-11/rootfs/opt/bitnami/scripts/kafka-env.sh new file mode 100644 index 000000000000..9f33fc07871a --- /dev/null +++ b/bitnami/kafka/3.6/debian-11/rootfs/opt/bitnami/scripts/kafka-env.sh @@ -0,0 +1,117 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Environment configuration for kafka + +# The values for all environment variables will be set in the below order of precedence +# 1. Custom environment variables defined below after Bitnami defaults +# 2. Constants defined in this file (environment variables with no default), i.e. BITNAMI_ROOT_DIR +# 3. Environment variables overridden via external files using *_FILE variables (see below) +# 4. Environment variables set externally (i.e. current Bash context/Dockerfile/userdata) + +# Load logging library +# shellcheck disable=SC1090,SC1091 +. /opt/bitnami/scripts/liblog.sh + +export BITNAMI_ROOT_DIR="/opt/bitnami" +export BITNAMI_VOLUME_DIR="/bitnami" + +# Logging configuration +export MODULE="${MODULE:-kafka}" +export BITNAMI_DEBUG="${BITNAMI_DEBUG:-false}" + +# By setting an environment variable matching *_FILE to a file path, the prefixed environment +# variable will be overridden with the value specified in that file +kafka_env_vars=( + KAFKA_MOUNTED_CONF_DIR + KAFKA_INTER_BROKER_USER + KAFKA_INTER_BROKER_PASSWORD + KAFKA_CONTROLLER_USER + KAFKA_CONTROLLER_PASSWORD + KAFKA_CERTIFICATE_PASSWORD + KAFKA_TLS_TRUSTSTORE_FILE + KAFKA_TLS_TYPE + KAFKA_TLS_CLIENT_AUTH + KAFKA_OPTS + KAFKA_CFG_SASL_ENABLED_MECHANISMS + KAFKA_KRAFT_CLUSTER_ID + KAFKA_SKIP_KRAFT_STORAGE_INIT + KAFKA_CLIENT_LISTENER_NAME + KAFKA_ZOOKEEPER_PROTOCOL + KAFKA_ZOOKEEPER_PASSWORD + KAFKA_ZOOKEEPER_USER + KAFKA_ZOOKEEPER_TLS_KEYSTORE_PASSWORD + KAFKA_ZOOKEEPER_TLS_TRUSTSTORE_PASSWORD + KAFKA_ZOOKEEPER_TLS_TRUSTSTORE_FILE + KAFKA_ZOOKEEPER_TLS_VERIFY_HOSTNAME + KAFKA_ZOOKEEPER_TLS_TYPE + KAFKA_CLIENT_USERS + KAFKA_CLIENT_PASSWORDS + KAFKA_HEAP_OPTS +) +for env_var in "${kafka_env_vars[@]}"; do + file_env_var="${env_var}_FILE" + if [[ -n "${!file_env_var:-}" ]]; then + if [[ -r "${!file_env_var:-}" ]]; then + export "${env_var}=$(< "${!file_env_var}")" + unset "${file_env_var}" + else + warn "Skipping export of '${env_var}'. '${!file_env_var:-}' is not readable." + fi + fi +done +unset kafka_env_vars + +# Paths +export KAFKA_BASE_DIR="${BITNAMI_ROOT_DIR}/kafka" +export KAFKA_VOLUME_DIR="/bitnami/kafka" +export KAFKA_DATA_DIR="${KAFKA_VOLUME_DIR}/data" +export KAFKA_CONF_DIR="${KAFKA_BASE_DIR}/config" +export KAFKA_CONF_FILE="${KAFKA_CONF_DIR}/server.properties" +export KAFKA_MOUNTED_CONF_DIR="${KAFKA_MOUNTED_CONF_DIR:-${KAFKA_VOLUME_DIR}/config}" +export KAFKA_CERTS_DIR="${KAFKA_CONF_DIR}/certs" +export KAFKA_INITSCRIPTS_DIR="/docker-entrypoint-initdb.d" +export KAFKA_LOG_DIR="${KAFKA_BASE_DIR}/logs" +export KAFKA_HOME="$KAFKA_BASE_DIR" +export PATH="${KAFKA_BASE_DIR}/bin:${BITNAMI_ROOT_DIR}/java/bin:${PATH}" + +# System users (when running with a privileged user) +export KAFKA_DAEMON_USER="kafka" +export KAFKA_DAEMON_GROUP="kafka" + +# Kafka runtime settings +export KAFKA_INTER_BROKER_USER="${KAFKA_INTER_BROKER_USER:-user}" +export KAFKA_INTER_BROKER_PASSWORD="${KAFKA_INTER_BROKER_PASSWORD:-bitnami}" +export KAFKA_CONTROLLER_USER="${KAFKA_CONTROLLER_USER:-controller_user}" +export KAFKA_CONTROLLER_PASSWORD="${KAFKA_CONTROLLER_PASSWORD:-bitnami}" +export KAFKA_CERTIFICATE_PASSWORD="${KAFKA_CERTIFICATE_PASSWORD:-}" +export KAFKA_TLS_TRUSTSTORE_FILE="${KAFKA_TLS_TRUSTSTORE_FILE:-}" +export KAFKA_TLS_TYPE="${KAFKA_TLS_TYPE:-JKS}" +export KAFKA_TLS_CLIENT_AUTH="${KAFKA_TLS_CLIENT_AUTH:-required}" +export KAFKA_OPTS="${KAFKA_OPTS:-}" + +# Kafka configuration overrides +export KAFKA_CFG_SASL_ENABLED_MECHANISMS="${KAFKA_CFG_SASL_ENABLED_MECHANISMS:-PLAIN,SCRAM-SHA-256,SCRAM-SHA-512}" +export KAFKA_KRAFT_CLUSTER_ID="${KAFKA_KRAFT_CLUSTER_ID:-}" +export KAFKA_SKIP_KRAFT_STORAGE_INIT="${KAFKA_SKIP_KRAFT_STORAGE_INIT:-false}" +export KAFKA_CLIENT_LISTENER_NAME="${KAFKA_CLIENT_LISTENER_NAME:-}" + +# ZooKeeper connection settings +export KAFKA_ZOOKEEPER_PROTOCOL="${KAFKA_ZOOKEEPER_PROTOCOL:-PLAINTEXT}" +export KAFKA_ZOOKEEPER_PASSWORD="${KAFKA_ZOOKEEPER_PASSWORD:-}" +export KAFKA_ZOOKEEPER_USER="${KAFKA_ZOOKEEPER_USER:-}" +export KAFKA_ZOOKEEPER_TLS_KEYSTORE_PASSWORD="${KAFKA_ZOOKEEPER_TLS_KEYSTORE_PASSWORD:-}" +export KAFKA_ZOOKEEPER_TLS_TRUSTSTORE_PASSWORD="${KAFKA_ZOOKEEPER_TLS_TRUSTSTORE_PASSWORD:-}" +export KAFKA_ZOOKEEPER_TLS_TRUSTSTORE_FILE="${KAFKA_ZOOKEEPER_TLS_TRUSTSTORE_FILE:-}" +export KAFKA_ZOOKEEPER_TLS_VERIFY_HOSTNAME="${KAFKA_ZOOKEEPER_TLS_VERIFY_HOSTNAME:-true}" +export KAFKA_ZOOKEEPER_TLS_TYPE="${KAFKA_ZOOKEEPER_TLS_TYPE:-JKS}" + +# Authentication +export KAFKA_CLIENT_USERS="${KAFKA_CLIENT_USERS:-user}" +export KAFKA_CLIENT_PASSWORDS="${KAFKA_CLIENT_PASSWORDS:-bitnami}" + +# Java settings +export KAFKA_HEAP_OPTS="${KAFKA_HEAP_OPTS:--Xmx1024m -Xms1024m}" + +# Custom environment variables may be defined below diff --git a/bitnami/kafka/3.6/debian-11/rootfs/opt/bitnami/scripts/kafka/entrypoint.sh b/bitnami/kafka/3.6/debian-11/rootfs/opt/bitnami/scripts/kafka/entrypoint.sh new file mode 100755 index 000000000000..deafe6d4660a --- /dev/null +++ b/bitnami/kafka/3.6/debian-11/rootfs/opt/bitnami/scripts/kafka/entrypoint.sh @@ -0,0 +1,29 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +# shellcheck disable=SC1091 + +set -o errexit +set -o nounset +set -o pipefail +# set -o xtrace # Uncomment this line for debugging purposes + +# Load libraries +. /opt/bitnami/scripts/liblog.sh +. /opt/bitnami/scripts/libbitnami.sh +. /opt/bitnami/scripts/libkafka.sh + +# Load Kafka environment variables +. /opt/bitnami/scripts/kafka-env.sh + +print_welcome_page + +if [[ "$*" = *"/opt/bitnami/scripts/kafka/run.sh"* || "$*" = *"/run.sh"* ]]; then + info "** Starting Kafka setup **" + /opt/bitnami/scripts/kafka/setup.sh + info "** Kafka setup finished! **" +fi + +echo "" +exec "$@" diff --git a/bitnami/kafka/3.6/debian-11/rootfs/opt/bitnami/scripts/kafka/postunpack.sh b/bitnami/kafka/3.6/debian-11/rootfs/opt/bitnami/scripts/kafka/postunpack.sh new file mode 100755 index 000000000000..b6526959daf7 --- /dev/null +++ b/bitnami/kafka/3.6/debian-11/rootfs/opt/bitnami/scripts/kafka/postunpack.sh @@ -0,0 +1,46 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +# shellcheck disable=SC1091 + +set -o errexit +set -o nounset +set -o pipefail +# set -o xtrace # Uncomment this line for debugging purposes + +# Load libraries +. /opt/bitnami/scripts/libkafka.sh +. /opt/bitnami/scripts/libfs.sh + +# Load Kafka environment variables +. /opt/bitnami/scripts/kafka-env.sh + +# Move server.properties from configtmp to config +# Temporary solution until kafka tarball places server.properties into config +if [[ -d "${KAFKA_BASE_DIR}/configtmp" ]]; then + mv "${KAFKA_BASE_DIR}/configtmp"/* "$KAFKA_CONF_DIR" + rmdir "${KAFKA_BASE_DIR}/configtmp" +fi +[[ -d "${KAFKA_BASE_DIR}/conf" ]] && rmdir "${KAFKA_BASE_DIR}/conf" + +# Ensure directories used by Kafka exist and have proper ownership and permissions +for dir in "$KAFKA_LOG_DIR" "$KAFKA_CONF_DIR" "$KAFKA_MOUNTED_CONF_DIR" "$KAFKA_VOLUME_DIR" "$KAFKA_DATA_DIR" "$KAFKA_INITSCRIPTS_DIR"; do + ensure_dir_exists "$dir" +done +chmod -R g+rwX "$KAFKA_BASE_DIR" "$KAFKA_VOLUME_DIR" "$KAFKA_DATA_DIR" "$KAFKA_INITSCRIPTS_DIR" + +# Move the original server.properties, so users can skip initialization logic by mounting their own server.properties directly instead of using the MOUNTED_CONF_DIR +mv "${KAFKA_CONF_DIR}/server.properties" "${KAFKA_CONF_DIR}/server.properties.original" + +# Disable logging to stdout and garbage collection +# Source: https://logging.apache.org/log4j/log4j-2.4/manual/appenders.html +replace_in_file "${KAFKA_BASE_DIR}/bin/kafka-server-start.sh" " [-]loggc" " " +replace_in_file "${KAFKA_CONF_DIR}/log4j.properties" "DailyRollingFileAppender" "ConsoleAppender" + +# Disable the default console logger in favour of KafkaAppender (which provides the exact output) +echo "log4j.appender.stdout.Threshold=OFF" >>"${KAFKA_CONF_DIR}/log4j.properties" + +# Remove invalid parameters for ConsoleAppender +remove_in_file "${KAFKA_CONF_DIR}/log4j.properties" "DatePattern" +remove_in_file "${KAFKA_CONF_DIR}/log4j.properties" "Appender.File" diff --git a/bitnami/kafka/3.6/debian-11/rootfs/opt/bitnami/scripts/kafka/run.sh b/bitnami/kafka/3.6/debian-11/rootfs/opt/bitnami/scripts/kafka/run.sh new file mode 100755 index 000000000000..a82f26867e70 --- /dev/null +++ b/bitnami/kafka/3.6/debian-11/rootfs/opt/bitnami/scripts/kafka/run.sh @@ -0,0 +1,32 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +# shellcheck disable=SC1091 + +set -o errexit +set -o nounset +set -o pipefail +# set -o xtrace # Uncomment this line for debugging purposes + +# Load libraries +. /opt/bitnami/scripts/libkafka.sh +. /opt/bitnami/scripts/libos.sh + +# Load Kafka environment variables +. /opt/bitnami/scripts/kafka-env.sh + +if [[ -f "${KAFKA_CONF_DIR}/kafka_jaas.conf" ]]; then + export KAFKA_OPTS="-Djava.security.auth.login.config=${KAFKA_CONF_DIR}/kafka_jaas.conf" +fi + +cmd="$KAFKA_HOME/bin/kafka-server-start.sh" +args=("$KAFKA_CONF_FILE") +! is_empty_value "${KAFKA_EXTRA_FLAGS:-}" && args=("${args[@]}" "${KAFKA_EXTRA_FLAGS[@]}") + +info "** Starting Kafka **" +if am_i_root; then + exec_as_user "$KAFKA_DAEMON_USER" "$cmd" "${args[@]}" "$@" +else + exec "$cmd" "${args[@]}" "$@" +fi diff --git a/bitnami/kafka/3.6/debian-11/rootfs/opt/bitnami/scripts/kafka/setup.sh b/bitnami/kafka/3.6/debian-11/rootfs/opt/bitnami/scripts/kafka/setup.sh new file mode 100755 index 000000000000..a1dcc1d2d162 --- /dev/null +++ b/bitnami/kafka/3.6/debian-11/rootfs/opt/bitnami/scripts/kafka/setup.sh @@ -0,0 +1,60 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +# shellcheck disable=SC1091 + +set -o errexit +set -o nounset +set -o pipefail +# set -o xtrace # Uncomment this line for debugging purposes + +# Load libraries +. /opt/bitnami/scripts/libfs.sh +. /opt/bitnami/scripts/libos.sh +. /opt/bitnami/scripts/libkafka.sh + +# Load Kafka environment variables +. /opt/bitnami/scripts/kafka-env.sh + +# Map Kafka environment variables +kafka_create_alias_environment_variables + +# Dinamically set node.id/broker.id/controller.quorum.voters if the _COMMAND environment variable is set +kafka_dynamic_environment_variables + +# Set the default tuststore locations before validation +kafka_configure_default_truststore_locations +# Ensure Kafka user and group exist when running as 'root' +am_i_root && ensure_user_exists "$KAFKA_DAEMON_USER" --group "$KAFKA_DAEMON_GROUP" +# Ensure directories used by Kafka exist and have proper ownership and permissions +for dir in "$KAFKA_LOG_DIR" "$KAFKA_CONF_DIR" "$KAFKA_MOUNTED_CONF_DIR" "$KAFKA_VOLUME_DIR" "$KAFKA_DATA_DIR"; do + if am_i_root; then + ensure_dir_exists "$dir" "$KAFKA_DAEMON_USER" "$KAFKA_DAEMON_GROUP" + else + ensure_dir_exists "$dir" + fi +done + +# Kafka validation, skipped if server.properties was mounted at either $KAFKA_MOUNTED_CONF_DIR or $KAFKA_CONF_DIR +[[ ! -f "${KAFKA_MOUNTED_CONF_DIR}/server.properties" && ! -f "$KAFKA_CONF_FILE" ]] && kafka_validate +# Kafka initialization, skipped if server.properties was mounted at $KAFKA_CONF_DIR +[[ ! -f "$KAFKA_CONF_FILE" ]] && kafka_initialize + +# Initialise KRaft metadata storage if process.roles configured +if grep -q "^process.roles=" "$KAFKA_CONF_FILE" && ! is_boolean_yes "$KAFKA_SKIP_KRAFT_STORAGE_INIT" ; then + kafka_kraft_storage_initialize +fi +# Configure Zookeeper SCRAM users +if is_boolean_yes "${KAFKA_ZOOKEEPER_BOOTSTRAP_SCRAM_USERS:-}"; then + kafka_zookeeper_create_sasl_scram_users +fi +# KRaft controllers may get stuck starting when the controller quorum voters are changed. +# Workaround: Remove quorum-state file when scaling up/down controllers (Waiting proposal KIP-853) +# https://cwiki.apache.org/confluence/display/KAFKA/KIP-853%3A+KRaft+Voter+Changes +if [[ -f "${KAFKA_DATA_DIR}/__cluster_metadata-0/quorum-state" ]] && grep -q "^controller.quorum.voters=" "$KAFKA_CONF_FILE" && kafka_kraft_quorum_voters_changed; then + warn "Detected inconsitences between controller.quorum.voters and quorum-state, removing it..." + rm -f "${KAFKA_DATA_DIR}/__cluster_metadata-0/quorum-state" +fi +# Ensure custom initialization scripts are executed +kafka_custom_init_scripts diff --git a/bitnami/kafka/3.6/debian-11/rootfs/opt/bitnami/scripts/libkafka.sh b/bitnami/kafka/3.6/debian-11/rootfs/opt/bitnami/scripts/libkafka.sh new file mode 100644 index 000000000000..e27038cbf564 --- /dev/null +++ b/bitnami/kafka/3.6/debian-11/rootfs/opt/bitnami/scripts/libkafka.sh @@ -0,0 +1,1172 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Bitnami Kafka library + +# shellcheck disable=SC1090,SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/libfile.sh +. /opt/bitnami/scripts/libfs.sh +. /opt/bitnami/scripts/liblog.sh +. /opt/bitnami/scripts/libos.sh +. /opt/bitnami/scripts/libvalidations.sh +. /opt/bitnami/scripts/libservice.sh + +# Functions + +######################## +# Set a configuration setting value to a file +# Globals: +# None +# Arguments: +# $1 - file +# $2 - key +# $3 - values (array) +# Returns: +# None +######################### +kafka_common_conf_set() { + local file="${1:?missing file}" + local key="${2:?missing key}" + shift + shift + local values=("$@") + + if [[ "${#values[@]}" -eq 0 ]]; then + stderr_print "missing value" + return 1 + elif [[ "${#values[@]}" -ne 1 ]]; then + for i in "${!values[@]}"; do + kafka_common_conf_set "$file" "${key[$i]}" "${values[$i]}" + done + else + value="${values[0]}" + # Check if the value was set before + if grep -q "^[#\\s]*$key\s*=.*" "$file"; then + # Update the existing key + replace_in_file "$file" "^[#\\s]*${key}\s*=.*" "${key}=${value}" false + else + # Add a new key + printf '\n%s=%s' "$key" "$value" >>"$file" + fi + fi +} + +######################## +# Returns true if at least one listener is configured using SSL +# Globals: +# KAFKA_CFG_LISTENERS +# KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP +# Arguments: +# None +# Returns: +# true/false +######################### +kafka_has_ssl_listener(){ + if ! is_empty_value "${KAFKA_CFG_LISTENERS:-}"; then + if is_empty_value "${KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP:-}"; then + if [[ "$KAFKA_CFG_LISTENERS" =~ SSL: || "$KAFKA_CFG_LISTENERS" =~ SASL_SSL: ]]; then + return + fi + else + read -r -a protocol_maps <<<"$(tr ',' ' ' <<<"$KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP")" + for protocol_map in "${protocol_maps[@]}"; do + read -r -a map <<<"$(tr ':' ' ' <<<"$protocol_map")" + # Obtain the listener and protocol from protocol map string, e.g. CONTROLLER:PLAINTEXT + listener="${map[0]}" + protocol="${map[1]}" + if [[ "$protocol" = "SSL" || "$protocol" = "SASL_SSL" ]]; then + if [[ "$KAFKA_CFG_LISTENERS" =~ $listener ]]; then + return + fi + fi + done + fi + fi + return 1 +} + +######################## +# Returns true if at least one listener is configured using SASL +# Globals: +# KAFKA_CFG_LISTENERS +# KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP +# Arguments: +# None +# Returns: +# true/false +######################### +kafka_has_sasl_listener(){ + if ! is_empty_value "${KAFKA_CFG_LISTENERS:-}"; then + if is_empty_value "${KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP:-}"; then + if [[ "$KAFKA_CFG_LISTENERS" =~ SASL_PLAINTEXT: ]] || [[ "$KAFKA_CFG_LISTENERS" =~ SASL_SSL: ]]; then + return + fi + else + read -r -a protocol_maps <<<"$(tr ',' ' ' <<<"$KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP")" + for protocol_map in "${protocol_maps[@]}"; do + read -r -a map <<<"$(tr ':' ' ' <<<"$protocol_map")" + # Obtain the listener and protocol from protocol map string, e.g. CONTROLLER:PLAINTEXT + listener="${map[0]}" + protocol="${map[1]}" + if [[ "$protocol" = "SASL_PLAINTEXT" || "$protocol" = "SASL_SSL" ]]; then + if [[ "$KAFKA_CFG_LISTENERS" =~ $listener ]]; then + return + fi + fi + done + fi + fi + return 1 +} + +######################## +# Returns true if at least one listener is configured using plaintext +# Globals: +# KAFKA_CFG_LISTENERS +# KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP +# Arguments: +# None +# Returns: +# true/false +######################### +kafka_has_plaintext_listener(){ + if ! is_empty_value "${KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP:-}"; then + read -r -a protocol_maps <<<"$(tr ',' ' ' <<<"$KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP")" + for protocol_map in "${protocol_maps[@]}"; do + read -r -a map <<<"$(tr ':' ' ' <<<"$protocol_map")" + # Obtain the listener and protocol from protocol map string, e.g. CONTROLLER:PLAINTEXT + listener="${map[0]}" + protocol="${map[1]}" + if [[ "$protocol" = "PLAINTEXT" ]]; then + if is_empty_value "${KAFKA_CFG_LISTENERS:-}" || [[ "$KAFKA_CFG_LISTENERS" =~ $listener ]]; then + return + fi + fi + done + else + if is_empty_value "${KAFKA_CFG_LISTENERS:-}" || [[ "$KAFKA_CFG_LISTENERS" =~ PLAINTEXT: ]]; then + return + fi + fi + return 1 +} + +######################## +# Backwards compatibility measure to configure the TLS truststore locations +# Globals: +# KAFKA_CONF_FILE +# Arguments: +# None +# Returns: +# None +######################### +kafka_configure_default_truststore_locations() { + # Backwards compatibility measure to allow custom truststore locations but at the same time not disrupt + # the UX that the previous version of the containers and the helm chart have. + # Context: The chart and containers by default assumed that the truststore location was KAFKA_CERTS_DIR/kafka.truststore.jks or KAFKA_MOUNTED_CONF_DIR/certs/kafka.truststore.jks. + # Because of this, we could not use custom certificates in different locations (use case: A custom base image that already has a truststore). Changing the logic to allow custom + # locations implied major changes in the current user experience (which only required to mount certificates at the assumed location). In order to maintain this compatibility we need + # use this logic that sets the KAFKA_TLS_*_FILE variables to the previously assumed locations in case it is not set + + # Kafka truststore + if kafka_has_ssl_listener && is_empty_value "${KAFKA_TLS_TRUSTSTORE_FILE:-}"; then + local kafka_truststore_filename="kafka.truststore.jks" + [[ "$KAFKA_TLS_TYPE" = "PEM" ]] && kafka_truststore_filename="kafka.truststore.pem" + if [[ -f "${KAFKA_CERTS_DIR}/${kafka_truststore_filename}" ]]; then + # Mounted in /opt/bitnami/kafka/conf/certs + export KAFKA_TLS_TRUSTSTORE_FILE="${KAFKA_CERTS_DIR}/${kafka_truststore_filename}" + else + # Mounted in /bitnami/kafka/conf/certs + export KAFKA_TLS_TRUSTSTORE_FILE="${KAFKA_MOUNTED_CONF_DIR}/certs/${kafka_truststore_filename}" + fi + fi + # Zookeeper truststore + if [[ "${KAFKA_ZOOKEEPER_PROTOCOL:-}" =~ SSL ]] && is_empty_value "${KAFKA_ZOOKEEPER_TLS_TRUSTSTORE_FILE:-}"; then + local zk_truststore_filename="zookeeper.truststore.jks" + [[ "$KAFKA_ZOOKEEPER_TLS_TYPE" = "PEM" ]] && zk_truststore_filename="zookeeper.truststore.pem" + if [[ -f "${KAFKA_CERTS_DIR}/${zk_truststore_filename}" ]]; then + # Mounted in /opt/bitnami/kafka/conf/certs + export KAFKA_ZOOKEEPER_TLS_TRUSTSTORE_FILE="${KAFKA_CERTS_DIR}/${zk_truststore_filename}" + else + # Mounted in /bitnami/kafka/conf/certs + export KAFKA_ZOOKEEPER_TLS_TRUSTSTORE_FILE="${KAFKA_MOUNTED_CONF_DIR}/certs/${zk_truststore_filename}" + fi + fi +} + +######################## +# Set a configuration setting value to server.properties +# Globals: +# KAFKA_CONF_FILE +# Arguments: +# $1 - key +# $2 - values (array) +# Returns: +# None +######################### +kafka_server_conf_set() { + kafka_common_conf_set "$KAFKA_CONF_FILE" "$@" +} + +######################## +# Set a configuration setting value to producer.properties and consumer.properties +# Globals: +# KAFKA_CONF_DIR +# Arguments: +# $1 - key +# $2 - values (array) +# Returns: +# None +######################### +kafka_producer_consumer_conf_set() { + kafka_common_conf_set "$KAFKA_CONF_DIR/producer.properties" "$@" + kafka_common_conf_set "$KAFKA_CONF_DIR/consumer.properties" "$@" +} + +######################## +# Create alias for environment variable, so both can be used +# Globals: +# None +# Arguments: +# $1 - Alias environment variable name +# $2 - Original environment variable name +# Returns: +# None +######################### +kafka_declare_alias_env() { + local -r alias="${1:?missing environment variable alias}" + local -r original="${2:?missing original environment variable}" + if printenv "${original}" >/dev/null; then + export "$alias"="${!original:-}" + fi +} + +######################## +# Map Kafka legacy environment variables to the new names +# Globals: +# KAFKA_* +# Arguments: +# None +# Returns: +# None +######################### +kafka_create_alias_environment_variables() { + suffixes=( + "ADVERTISED_LISTENERS" + "BROKER_ID" + "NODE_ID" + "CONTROLLER_QUORUM_VOTERS" + "PROCESS_ROLES" + "DEFAULT_REPLICATION_FACTOR" + "DELETE_TOPIC_ENABLE" + "INTER_BROKER_LISTENER_NAME" + "LISTENERS" + "LISTENER_SECURITY_PROTOCOL_MAP" + "LOG_DIRS" + "LOG_FLUSH_INTERVAL_MESSAGES" + "LOG_FLUSH_INTERVAL_MS" + "LOG_MESSAGE_FORMAT_VERSION" + "LOG_RETENTION_BYTES" + "LOG_RETENTION_CHECK_INTERVALS_MS" + "LOG_RETENTION_HOURS" + "LOG_SEGMENT_BYTES" + "MESSAGE_MAX_BYTES" + "NUM_IO_THREADS" + "NUM_NETWORK_THREADS" + "NUM_PARTITIONS" + "NUM_RECOVERY_THREADS_PER_DATA_DIR" + "OFFSETS_TOPIC_REPLICATION_FACTOR" + "SOCKET_RECEIVE_BUFFER_BYTES" + "SOCKET_REQUEST_MAX_BYTES" + "SOCKET_SEND_BUFFER_BYTES" + "SSL_ENDPOINT_IDENTIFICATION_ALGORITHM" + "TRANSACTION_STATE_LOG_MIN_ISR" + "TRANSACTION_STATE_LOG_REPLICATION_FACTOR" + "ZOOKEEPER_CONNECT" + "ZOOKEEPER_CONNECTION_TIMEOUT_MS" + ) + kafka_declare_alias_env "KAFKA_CFG_LOG_DIRS" "KAFKA_LOGS_DIRS" + kafka_declare_alias_env "KAFKA_CFG_LOG_SEGMENT_BYTES" "KAFKA_SEGMENT_BYTES" + kafka_declare_alias_env "KAFKA_CFG_MESSAGE_MAX_BYTES" "KAFKA_MAX_MESSAGE_BYTES" + kafka_declare_alias_env "KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS" "KAFKA_ZOOKEEPER_CONNECT_TIMEOUT_MS" + kafka_declare_alias_env "KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE" "KAFKA_AUTO_CREATE_TOPICS_ENABLE" + kafka_declare_alias_env "KAFKA_CLIENT_USERS" "KAFKA_BROKER_USER" + kafka_declare_alias_env "KAFKA_CLIENT_PASSWORDS" "KAFKA_BROKER_PASSWORD" + kafka_declare_alias_env "KAFKA_CLIENT_LISTENER_NAME" "KAFKA_CLIENT_LISTENER" + for s in "${suffixes[@]}"; do + kafka_declare_alias_env "KAFKA_CFG_${s}" "KAFKA_${s}" + done +} + +######################## +# Validate settings in KAFKA_* env vars +# Globals: +# KAFKA_* +# Arguments: +# None +# Returns: +# None +######################### +kafka_validate() { + debug "Validating settings in KAFKA_* env vars..." + local error_code=0 + + # Auxiliary functions + print_validation_error() { + error "$1" + error_code=1 + } + check_multi_value() { + if [[ " ${2} " != *" ${!1} "* ]]; then + print_validation_error "The allowed values for ${1} are: ${2}" + fi + } + # If process.roles configured, check its values are valid and perform additional checks for each + check_kraft_process_roles() { + read -r -a roles_list <<<"$(tr ',;' ' ' <<<"$KAFKA_CFG_PROCESS_ROLES")" + for role in "${roles_list[@]}"; do + case "$role" in + broker) ;; + controller) + if is_empty_value "${KAFKA_CFG_CONTROLLER_LISTENER_NAMES:-}"; then + print_validation_error "Role 'controller' enabled but environment variable KAFKA_CFG_CONTROLLER_LISTENER_NAMES was not provided." + fi + if is_empty_value "${KAFKA_CFG_LISTENERS:-}" || [[ ! "$KAFKA_CFG_LISTENERS" =~ ${KAFKA_CFG_CONTROLLER_LISTENER_NAMES} ]]; then + print_validation_error "Role 'controller' enabled but listener ${KAFKA_CFG_CONTROLLER_LISTENER_NAMES} not found in KAFKA_CFG_LISTENERS." + fi + ;; + *) + print_validation_error "Invalid KRaft process role '$role'. Supported roles are 'broker,controller'" + ;; + esac + done + } + # Check all listeners are using a unique and valid port + check_listener_ports(){ + check_allowed_port() { + local port="${1:?missing port variable}" + local -a validate_port_args=() + ! am_i_root && validate_port_args+=("-unprivileged") + validate_port_args+=("$port") + if ! err=$(validate_port "${validate_port_args[@]}"); then + print_validation_error "An invalid port ${port} was specified in the environment variable KAFKA_CFG_LISTENERS: ${err}." + fi + } + + read -r -a listeners <<<"$(tr ',' ' ' <<<"${KAFKA_CFG_LISTENERS:-}")" + local -a ports=() + for listener in "${listeners[@]}"; do + read -r -a arr <<<"$(tr ':' ' ' <<<"$listener")" + # Obtain the port from listener string, e.g. PLAINTEXT://:9092 + port="${arr[2]}" + check_allowed_port "$port" + ports+=("$port") + done + # Check each listener is using an unique port + local -a unique_ports=() + read -r -a unique_ports <<< "$(echo "${ports[@]}" | tr ' ' '\n' | sort -u | tr '\n' ' ')" + if [[ "${#ports[@]}" != "${#unique_ports[@]}" ]]; then + print_validation_error "There are listeners bound to the same port" + fi + } + check_listener_protocols(){ + local -r allowed_protocols=("PLAINTEXT" "SASL_PLAINTEXT" "SASL_SSL" "SSL") + read -r -a protocol_maps <<<"$(tr ',' ' ' <<<"$KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP")" + for protocol_map in "${protocol_maps[@]}"; do + read -r -a map <<<"$(tr ':' ' ' <<<"$protocol_map")" + # Obtain the listener and protocol from protocol map string, e.g. CONTROLLER:PLAINTEXT + listener="${map[0]}" + protocol="${map[1]}" + # Check protocol in allowed list + if [[ ! "${allowed_protocols[*]}" =~ $protocol ]]; then + print_validation_error "Authentication protocol ${protocol} is not supported!" + fi + # If inter-broker listener configured with SASL, ensure KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL is set + if [[ "$listener" = "${KAFKA_CFG_INTER_BROKER_LISTENER_NAME:-INTERNAL}" ]]; then + if [[ "$protocol" = "SASL_PLAINTEXT" ]] || [[ "$protocol" = "SASL_SSL" ]]; then + if is_empty_value "${KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL:-}"; then + print_validation_error "When using SASL for inter broker comunication the mechanism should be provided using KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL" + fi + if is_empty_value "${KAFKA_INTER_BROKER_USER:-}" || is_empty_value "${KAFKA_INTER_BROKER_PASSWORD:-}"; then + print_validation_error "In order to configure SASL authentication for Kafka inter-broker communications, you must provide the SASL credentials. Set the environment variables KAFKA_INTER_BROKER_USER and KAFKA_INTER_BROKER_PASSWORD to configure the credentials for SASL authentication with between brokers." + fi + fi + # If controller listener configured with SASL, ensure KAFKA_CFG_SASL_MECHANISM_CONTROLLER_PROTOCOL is set + elif [[ "${KAFKA_CFG_CONTROLLER_LISTENER_NAMES:-CONTROLLER}" =~ $listener ]]; then + if [[ "$protocol" = "SASL_PLAINTEXT" ]] || [[ "$protocol" = "SASL_SSL" ]]; then + if is_empty_value "${KAFKA_CFG_SASL_MECHANISM_CONTROLLER_PROTOCOL:-}"; then + print_validation_error "When using SASL for controller comunication the mechanism should be provided at KAFKA_CFG_SASL_MECHANISM_CONTROLLER_PROTOCOL" + fi + if is_empty_value "${KAFKA_CONTROLLER_USER:-}" || is_empty_value "${KAFKA_CONTROLLER_PASSWORD:-}"; then + print_validation_error "In order to configure SASL authentication for Kafka control plane communications, you must provide the SASL credentials. Set the environment variables KAFKA_CONTROLLER_USER and KAFKA_CONTROLLER_PASSWORD to configure the credentials for SASL authentication with between controllers." + fi + fi + else + if [[ "$protocol" = "SASL_PLAINTEXT" ]] || [[ "$protocol" = "SASL_SSL" ]]; then + if is_empty_value "${KAFKA_CLIENT_USERS:-}" || is_empty_value "${KAFKA_CLIENT_PASSWORDS:-}"; then + print_validation_error "In order to configure SASL authentication for Kafka, you must provide the SASL credentials. Set the environment variables KAFKA_CLIENT_USERS and KAFKA_CLIENT_PASSWORDS to configure the credentials for SASL authentication with clients." + fi + fi + + fi + done + } + + if is_empty_value "${KAFKA_CFG_PROCESS_ROLES:-}" && is_empty_value "${KAFKA_CFG_ZOOKEEPER_CONNECT:-}"; then + print_validation_error "Kafka haven't been configured to work in either Raft or Zookeper mode. Please make sure at least one of the modes is configured." + fi + # Check KRaft mode + if ! is_empty_value "${KAFKA_CFG_PROCESS_ROLES:-}"; then + # Raft + if [[ "$(kafka_get_version)" =~ ^3\.2\. ]]; then + warn "KRaft mode is not production-ready in Kafka 3.2, for production environments, we recommend upgrading " + fi + # Only allow Zookeeper configuration if migration mode is enabled + if ! is_empty_value "${KAFKA_CFG_ZOOKEEPER_CONNECT:-}" && + { is_empty_value "${KAFKA_CFG_ZOOKEEPER_METADATA_MIGRATION_ENABLE:-}" || ! is_boolean_yes "$KAFKA_CFG_ZOOKEEPER_METADATA_MIGRATION_ENABLE"; }; then + print_validation_error "Both KRaft mode and Zookeeper modes are configured, but KAFKA_CFG_ZOOKEEPER_METADATA_MIGRATION_ENABLE is not enabled" + fi + if is_empty_value "${KAFKA_CFG_NODE_ID:-}"; then + print_validation_error "KRaft mode requires an unique node.id, please set the environment variable KAFKA_CFG_NODE_ID" + fi + if is_empty_value "${KAFKA_CFG_CONTROLLER_QUORUM_VOTERS:-}"; then + print_validation_error "KRaft mode requires KAFKA_CFG_CONTROLLER_QUORUM_VOTERS to be set" + fi + check_kraft_process_roles + fi + # Check Zookeeper mode + if ! is_empty_value "${KAFKA_CFG_ZOOKEEPER_CONNECT:-}"; then + # If SSL/SASL_SSL protocol configured, check certificates are provided + if [[ "$KAFKA_ZOOKEEPER_PROTOCOL" =~ SSL ]]; then + if [[ "$KAFKA_ZOOKEEPER_TLS_TYPE" = "JKS" ]]; then + # Fail if truststore is not provided + if [[ ! -f "$KAFKA_ZOOKEEPER_TLS_TRUSTSTORE_FILE" ]]; then + print_validation_error "In order to configure the TLS encryption for Zookeeper with JKS certs you must mount your zookeeper.truststore.jks cert to the ${KAFKA_MOUNTED_CONF_DIR}/certs directory." + fi + # Warn if keystore is not provided, only required if Zookeper mTLS is enabled (ZOO_TLS_CLIENT_AUTH) + if [[ ! -f "${KAFKA_CERTS_DIR}/zookeeper.keystore.jks" ]] && [[ ! -f "${KAFKA_MOUNTED_CONF_DIR}/certs/zookeeper.keystore.jks" ]]; then + warn "In order to configure the mTLS for Zookeeper with JKS certs you must mount your zookeeper.keystore.jks cert to the ${KAFKA_MOUNTED_CONF_DIR}/certs directory." + fi + elif [[ "$KAFKA_ZOOKEEPER_TLS_TYPE" = "PEM" ]]; then + # Fail if CA / validation cert is not provided + if [[ ! -f "$KAFKA_ZOOKEEPER_TLS_TRUSTSTORE_FILE" ]]; then + print_validation_error "In order to configure the TLS encryption for Zookeeper with PEM certs you must mount your zookeeper.truststore.pem cert to the ${KAFKA_MOUNTED_CONF_DIR}/certs directory." + fi + # Warn if node key or cert are not provided, only required if Zookeper mTLS is enabled (ZOO_TLS_CLIENT_AUTH) + if { [[ ! -f "${KAFKA_CERTS_DIR}/zookeeper.keystore.pem" ]] || [[ ! -f "${KAFKA_CERTS_DIR}/zookeeper.keystore.key" ]]; } && + { [[ ! -f "${KAFKA_MOUNTED_CONF_DIR}/certs/zookeeper.keystore.pem" ]] || [[ ! -f "${KAFKA_MOUNTED_CONF_DIR}/certs/zookeeper.keystore.key" ]]; }; then + warn "In order to configure the mTLS for Zookeeper with PEM certs you must mount your zookeeper.keystore.pem cert and zookeeper.keystore.key key to the ${KAFKA_MOUNTED_CONF_DIR}/certs directory." + fi + fi + fi + # If SASL/SASL_SSL protocol configured, check certificates are provided + if [[ "$KAFKA_ZOOKEEPER_PROTOCOL" =~ SASL ]]; then + if is_empty_value "${KAFKA_ZOOKEEPER_USER:-}" || is_empty_value "${KAFKA_ZOOKEEPER_PASSWORD:-}"; then + print_validation_error "In order to configure SASL authentication for Kafka, you must provide the SASL credentials. Set the environment variables KAFKA_ZOOKEEPER_USER and KAFKA_ZOOKEEPER_PASSWORD, to configure the credentials for SASL authentication with Zookeeper." + fi + fi + # If using plaintext protocol, check it is explicitly allowed + if [[ "$KAFKA_ZOOKEEPER_PROTOCOL" = "PLAINTEXT" ]]; then + warn "The KAFKA_ZOOKEEPER_PROTOCOL environment variable does not configure SASL and/or SSL, this setting is not recommended for production environments." + fi + fi + # Check listener ports are unique and allowed + check_listener_ports + # Check listeners are mapped to a valid security protocol + check_listener_protocols + # Warn users if plaintext listeners are configured + if kafka_has_plaintext_listener; then + warn "Kafka has been configured with a PLAINTEXT listener, this setting is not recommended for production environments." + fi + # If SSL/SASL_SSL listeners configured, check certificates are provided + if kafka_has_ssl_listener; then + if [[ "$KAFKA_TLS_TYPE" = "JKS" ]] && + { [[ ! -f "${KAFKA_CERTS_DIR}/kafka.keystore.jks" ]] || [[ ! -f "$KAFKA_TLS_TRUSTSTORE_FILE" ]]; } && + { [[ ! -f "${KAFKA_MOUNTED_CONF_DIR}/certs/kafka.keystore.jks" ]] || [[ ! -f "$KAFKA_TLS_TRUSTSTORE_FILE" ]]; }; then + print_validation_error "In order to configure the TLS encryption for Kafka with JKS certs you must mount your kafka.keystore.jks and kafka.truststore.jks certs to the ${KAFKA_MOUNTED_CONF_DIR}/certs directory." + elif [[ "$KAFKA_TLS_TYPE" = "PEM" ]] && + { [[ ! -f "${KAFKA_CERTS_DIR}/kafka.keystore.pem" ]] || [[ ! -f "${KAFKA_CERTS_DIR}/kafka.keystore.key" ]] || [[ ! -f "$KAFKA_TLS_TRUSTSTORE_FILE" ]]; } && + { [[ ! -f "${KAFKA_MOUNTED_CONF_DIR}/certs/kafka.keystore.pem" ]] || [[ ! -f "${KAFKA_MOUNTED_CONF_DIR}/certs/kafka.keystore.key" ]] || [[ ! -f "$KAFKA_TLS_TRUSTSTORE_FILE" ]]; }; then + print_validation_error "In order to configure the TLS encryption for Kafka with PEM certs you must mount your kafka.keystore.pem, kafka.keystore.key and kafka.truststore.pem certs to the ${KAFKA_MOUNTED_CONF_DIR}/certs directory." + fi + fi + # If SASL/SASL_SSL listeners configured, check passwords are provided + if kafka_has_sasl_listener; then + if is_empty_value "${KAFKA_CFG_SASL_ENABLED_MECHANISMS:-}"; then + print_validation_error "Specified SASL protocol but no SASL mechanisms provided in KAFKA_CFG_SASL_ENABLED_MECHANISMS" + fi + fi + # Check users and passwords lists are the same size + read -r -a users <<<"$(tr ',;' ' ' <<<"${KAFKA_CLIENT_USERS:-}")" + read -r -a passwords <<<"$(tr ',;' ' ' <<<"${KAFKA_CLIENT_PASSWORDS:-}")" + if [[ "${#users[@]}" -ne "${#passwords[@]}" ]]; then + print_validation_error "Specify the same number of passwords on KAFKA_CLIENT_PASSWORDS as the number of users on KAFKA_CLIENT_USERS!" + fi + check_multi_value "KAFKA_TLS_TYPE" "JKS PEM" + check_multi_value "KAFKA_ZOOKEEPER_TLS_TYPE" "JKS PEM" + check_multi_value "KAFKA_ZOOKEEPER_PROTOCOL" "PLAINTEXT SASL SSL SASL_SSL" + check_multi_value "KAFKA_TLS_CLIENT_AUTH" "none requested required" + [[ "$error_code" -eq 0 ]] || return "$error_code" +} + +######################## +# Get kafka version +# Globals: +# KAFKA_* +# Arguments: +# None +# Returns: +# version +######################### +kafka_get_version() { + local -a cmd=("kafka-topics.sh" "--version") + am_i_root && cmd=("run_as_user" "$KAFKA_DAEMON_USER" "${cmd[@]}") + + read -r -a ver_split <<< "$("${cmd[@]}")" + echo "${ver_split[0]}" +} + +######################### +# Configure JAAS for a given listener and SASL mechanisms +# Globals: +# KAFKA_* +# Arguments: +# $1 - Name of the listener JAAS will be configured for +# $2 - Comma-separated list of SASL mechanisms to configure +# $3 - Comma-separated list of usernames +# $4 - Comma-separated list of passwords +# Returns: +# None +######################### +kafka_configure_server_jaas() { + local listener="${1:?missing listener name}" + local role="${2:-}" + + if [[ "$role" = "controller" ]]; then + local jaas_content=() + if [[ "$KAFKA_CFG_SASL_MECHANISM_CONTROLLER_PROTOCOL" = "PLAIN" ]]; then + jaas_content=( + "org.apache.kafka.common.security.plain.PlainLoginModule required" + "username=\"${KAFKA_CONTROLLER_USER}\"" + "password=\"${KAFKA_CONTROLLER_PASSWORD}\"" + "user_${KAFKA_CONTROLLER_USER}=\"${KAFKA_CONTROLLER_PASSWORD}\";" + ) + elif [[ "$KAFKA_CFG_SASL_MECHANISM_CONTROLLER_PROTOCOL" =~ SCRAM ]]; then + jaas_content=( + "org.apache.kafka.common.security.scram.ScramLoginModule required" + "username=\"${KAFKA_CONTROLLER_USER}\"" + "password=\"${KAFKA_CONTROLLER_PASSWORD}\";" + ) + fi + listener_lower="$(echo "$listener" | tr '[:upper:]' '[:lower:]')" + sasl_mechanism_lower="$(echo "$KAFKA_CFG_SASL_MECHANISM_CONTROLLER_PROTOCOL" | tr '[:upper:]' '[:lower:]')" + kafka_server_conf_set "listener.name.${listener_lower}.${sasl_mechanism_lower}.sasl.jaas.config" "${jaas_content[*]}" + else + read -r -a sasl_mechanisms_arr <<<"$(tr ',' ' ' <<<"$KAFKA_CFG_SASL_ENABLED_MECHANISMS")" + read -r -a users <<<"$(tr ',;' ' ' <<<"$KAFKA_CLIENT_USERS")" + read -r -a passwords <<<"$(tr ',;' ' ' <<<"$KAFKA_CLIENT_PASSWORDS")" + # Configure JAAS for each SASL mechanism + # ref: https://docs.confluent.io/platform/current/kafka/authentication_sasl/index.html + for sasl_mechanism in "${sasl_mechanisms_arr[@]}"; do + local jaas_content=() + # For PLAIN mechanism, only the first username will be used + if [[ "$sasl_mechanism" = "PLAIN" ]]; then + jaas_content=("org.apache.kafka.common.security.plain.PlainLoginModule required") + if [[ "$role" = "inter-broker" ]]; then + jaas_content+=( + "username=\"${KAFKA_INTER_BROKER_USER}\"" + "password=\"${KAFKA_INTER_BROKER_PASSWORD}\"" + ) + users+=("$KAFKA_INTER_BROKER_USER") + passwords+=("$KAFKA_INTER_BROKER_PASSWORD") + fi + for ((i = 0; i < ${#users[@]}; i++)); do + jaas_content+=("user_${users[i]}=\"${passwords[i]}\"") + done + # Add semi-colon to the last element of the array + jaas_content[${#jaas_content[@]} - 1]="${jaas_content[${#jaas_content[@]} - 1]};" + elif [[ "$sasl_mechanism" =~ SCRAM ]]; then + if [[ "$role" = "inter-broker" ]]; then + jaas_content=( + "org.apache.kafka.common.security.scram.ScramLoginModule required" + "username=\"${KAFKA_INTER_BROKER_USER}\"" + "password=\"${KAFKA_INTER_BROKER_PASSWORD}\";" + ) + else + jaas_content=("org.apache.kafka.common.security.scram.ScramLoginModule required;") + fi + fi + listener_lower="$(echo "$listener" | tr '[:upper:]' '[:lower:]')" + sasl_mechanism_lower="$(echo "$sasl_mechanism" | tr '[:upper:]' '[:lower:]')" + kafka_server_conf_set "listener.name.${listener_lower}.${sasl_mechanism_lower}.sasl.jaas.config" "${jaas_content[*]}" + done + fi +} + +######################## +# Configure Zookeeper JAAS authentication +# Globals: +# KAFKA_* +# Arguments: +# None +# Returns: +# None +######################### +kafka_zookeeper_configure_jaas(){ + local jaas_content=( + "org.apache.kafka.common.security.plain.PlainLoginModule required" + "username=\"${KAFKA_ZOOKEEPER_USER}\"" + "password=\"${KAFKA_ZOOKEEPER_PASSWORD}\";" + ) + + kafka_server_conf_set "sasl.jaas.config" "${jaas_content[*]}" +} + +######################## +# Generate JAAS authentication file for local producer/consumer to use +# Globals: +# KAFKA_* +# Arguments: +# $1 - Authentication protocol to use for the internal listener +# $2 - Authentication protocol to use for the client listener +# Returns: +# None +######################### +kafka_configure_consumer_producer_jaas(){ + local jaas_content=() + read -r -a users <<<"$(tr ',;' ' ' <<<"${KAFKA_CLIENT_USERS}")" + read -r -a passwords <<<"$(tr ',;' ' ' <<<"${KAFKA_CLIENT_PASSWORDS}")" + + if [[ "${KAFKA_CFG_SASL_ENABLED_MECHANISMS}" =~ SCRAM ]]; then + jaas_content=("org.apache.kafka.common.security.scram.ScramLoginModule required") + elif [[ "${KAFKA_CFG_SASL_ENABLED_MECHANISMS}" =~ PLAIN ]]; then + jaas_content=("org.apache.kafka.common.security.plain.PlainLoginModule required") + else + error "Couldn't configure a supported SASL mechanism for Kafka consumer/producer properties" + exit 1 + fi + + jaas_content+=( + "username=\"${users[0]}\"" + "password=\"${passwords[0]}\";" + ) + + kafka_producer_consumer_conf_set "sasl.jaas.config" "${jaas_content[*]}" +} + +######################## +# Create users in zookeper when using SASL/SCRAM mechanism +# Globals: +# KAFKA_* +# Arguments: +# None +# Returns: +# None +######################### +kafka_zookeeper_create_sasl_scram_users() { + info "Creating users in Zookeeper" + read -r -a users <<<"$(tr ',;' ' ' <<<"${KAFKA_CLIENT_USERS}")" + read -r -a passwords <<<"$(tr ',;' ' ' <<<"${KAFKA_CLIENT_PASSWORDS}")" + local zookeeper_connect + zookeeper_connect=$(grep "^zookeeper.connect=" "$KAFKA_CONF_FILE" | sed -E 's/^zookeeper\.connect=(\S+)$/\1/') + read -r -a zookeeper_hosts <<<"$(tr ',;' ' ' <<<"${zookeeper_connect}")" + + if [[ "${#zookeeper_hosts[@]}" -eq 0 ]]; then + error "Couldn't obtain zookeeper.connect from $KAFKA_CONF_FILE" + exit 1 + fi + # Wait for Zookeeper to be reachable + read -r -a aux <<<"$(tr ':' ' ' <<<"${zookeeper_hosts[0]}")" + local host="${aux[0]:?missing host}" + local port="${aux[1]:-2181}" + wait-for-port --host "$host" "$port" + + # Add interbroker credentials + if grep -Eq "^sasl.mechanism.inter.broker.protocol=SCRAM" "$KAFKA_CONF_FILE"; then + users+=("${KAFKA_INTER_BROKER_USER}") + passwords+=("${KAFKA_INTER_BROKER_PASSWORD}") + fi + for ((i = 0; i < ${#users[@]}; i++)); do + debug "Creating user ${users[i]} in zookeeper" + # Ref: https://docs.confluent.io/current/kafka/authentication_sasl/authentication_sasl_scram.html#sasl-scram-overview + debug_execute kafka-configs.sh --zookeeper "$zookeeper_connect" --alter --add-config "SCRAM-SHA-256=[iterations=8192,password=${passwords[i]}],SCRAM-SHA-512=[password=${passwords[i]}]" --entity-type users --entity-name "${users[i]}" + done +} + +######################## +# Configure Kafka SSL settings +# Globals: +# KAFKA_* +# Arguments: +# None +# Returns: +# None +######################### +kafka_configure_ssl() { + # Configures both Kafka server and producers/consumers + configure_both() { + kafka_server_conf_set "${1:?missing key}" "${2:?missing value}" + kafka_producer_consumer_conf_set "${1:?missing key}" "${2:?missing value}" + } + kafka_server_conf_set "ssl.client.auth" "${KAFKA_TLS_CLIENT_AUTH}" + configure_both ssl.keystore.type "${KAFKA_TLS_TYPE}" + configure_both ssl.truststore.type "${KAFKA_TLS_TYPE}" + local -r kafka_truststore_location="${KAFKA_CERTS_DIR}/$(basename "${KAFKA_TLS_TRUSTSTORE_FILE}")" + ! is_empty_value "${KAFKA_CERTIFICATE_PASSWORD:-}" && configure_both ssl.key.password "$KAFKA_CERTIFICATE_PASSWORD" + if [[ "$KAFKA_TLS_TYPE" = "PEM" ]]; then + file_to_multiline_property() { + awk 'NR > 1{print line"\\n\\"}{line=$0;}END{print $0" "}' <"${1:?missing file}" + } + remove_previous_cert_value() { + local key="${1:?missing key}" + files=( + "${KAFKA_CONF_FILE}" + "${KAFKA_CONF_DIR}/producer.properties" + "${KAFKA_CONF_DIR}/consumer.properties" + ) + for file in "${files[@]}"; do + if grep -q "^[#\\s]*$key\s*=.*" "$file"; then + # Delete all lines from the certificate beginning to its end + sed -i "/^[#\\s]*$key\s*=.*-----BEGIN/,/-----END/d" "$file" + fi + done + } + # We need to remove the previous cert value + # kafka_common_conf_set uses replace_in_file, which can't match multiple lines + remove_previous_cert_value ssl.keystore.key + remove_previous_cert_value ssl.keystore.certificate.chain + remove_previous_cert_value ssl.truststore.certificates + configure_both ssl.keystore.key "$(file_to_multiline_property "${KAFKA_CERTS_DIR}/kafka.keystore.key")" + configure_both ssl.keystore.certificate.chain "$(file_to_multiline_property "${KAFKA_CERTS_DIR}/kafka.keystore.pem")" + configure_both ssl.truststore.certificates "$(file_to_multiline_property "${kafka_truststore_location}")" + elif [[ "$KAFKA_TLS_TYPE" = "JKS" ]]; then + configure_both ssl.keystore.location "$KAFKA_CERTS_DIR"/kafka.keystore.jks + configure_both ssl.truststore.location "$kafka_truststore_location" + ! is_empty_value "${KAFKA_CERTIFICATE_PASSWORD:-}" && configure_both ssl.keystore.password "$KAFKA_CERTIFICATE_PASSWORD" + ! is_empty_value "${KAFKA_CERTIFICATE_PASSWORD:-}" && configure_both ssl.truststore.password "$KAFKA_CERTIFICATE_PASSWORD" + fi + true # Avoid the function to fail due to the check above +} + +######################## +# Get Zookeeper TLS settings +# Globals: +# KAFKA_ZOOKEEPER_TLS_* +# Arguments: +# None +# Returns: +# String +######################### +kafka_zookeeper_configure_tls() { + # Note that ZooKeeper does not support a key password different from the keystore password, + # so be sure to set the key password in the keystore to be identical to the keystore password; + # otherwise the connection attempt to Zookeeper will fail. + local keystore_location="" + local -r kafka_zk_truststore_location="${KAFKA_CERTS_DIR}/$(basename "${KAFKA_ZOOKEEPER_TLS_TRUSTSTORE_FILE}")" + + if [[ "$KAFKA_ZOOKEEPER_TLS_TYPE" = "JKS" ]] && [[ -f "${KAFKA_CERTS_DIR}/zookeeper.keystore.jks" ]]; then + keystore_location="${KAFKA_CERTS_DIR}/zookeeper.keystore.jks" + elif [[ "$KAFKA_ZOOKEEPER_TLS_TYPE" = "PEM" ]] && [[ -f "${KAFKA_CERTS_DIR}/zookeeper.keystore.pem" ]] && [[ -f "${KAFKA_CERTS_DIR}/zookeeper.keystore.key" ]]; then + # Concatenating private key into public certificate file + # This is needed to load keystore from location using PEM + keystore_location="${KAFKA_CERTS_DIR}/zookeeper.keypair.pem" + cat "${KAFKA_CERTS_DIR}/zookeeper.keystore.pem" "${KAFKA_CERTS_DIR}/zookeeper.keystore.key" > "$keystore_location" + fi + + kafka_server_conf_set "zookeeper.clientCnxnSocket" "org.apache.zookeeper.ClientCnxnSocketNetty" + kafka_server_conf_set "zookeeper.ssl.client.enable" "true" + is_boolean_yes "${KAFKA_ZOOKEEPER_TLS_VERIFY_HOSTNAME:-}" && kafka_server_conf_set "zookeeper.ssl.endpoint.identification.algorithm" "HTTPS" + ! is_empty_value "${keystore_location:-}" && kafka_server_conf_set "zookeeper.ssl.keystore.location" "${keystore_location}" + ! is_empty_value "${KAFKA_ZOOKEEPER_TLS_KEYSTORE_PASSWORD:-}" && kafka_server_conf_set "zookeeper.ssl.keystore.password" "${KAFKA_ZOOKEEPER_TLS_KEYSTORE_PASSWORD}" + ! is_empty_value "${kafka_zk_truststore_location:-}" && kafka_server_conf_set "zookeeper.ssl.truststore.location" "${kafka_zk_truststore_location}" + ! is_empty_value "${KAFKA_ZOOKEEPER_TLS_TRUSTSTORE_PASSWORD:-}" && kafka_server_conf_set "zookeeper.ssl.truststore.password" "${KAFKA_ZOOKEEPER_TLS_TRUSTSTORE_PASSWORD}" + true # Avoid the function to fail due to the check above +} + +######################## +# Configure Kafka configuration files from environment variables +# Globals: +# KAFKA_* +# Arguments: +# None +# Returns: +# None +######################### +kafka_configure_from_environment_variables() { + # List of special cases to apply to the variables + local -r exception_regexps=( + "s/sasl\.ssl/sasl_ssl/g" + "s/sasl\.plaintext/sasl_plaintext/g" + ) + # Map environment variables to config properties + for var in "${!KAFKA_CFG_@}"; do + key="$(echo "$var" | sed -e 's/^KAFKA_CFG_//g' -e 's/_/\./g' | tr '[:upper:]' '[:lower:]')" + + # Exception for the camel case in this environment variable + [[ "$var" == "KAFKA_CFG_ZOOKEEPER_CLIENTCNXNSOCKET" ]] && key="zookeeper.clientCnxnSocket" + + # Apply exception regexps + for regex in "${exception_regexps[@]}"; do + key="$(echo "$key" | sed "$regex")" + done + + value="${!var}" + kafka_server_conf_set "$key" "$value" + done +} + +######################## +# Initialize KRaft storage +# Globals: +# KAFKA_* +# Arguments: +# None +# Returns: +# None +######################### +kafka_kraft_storage_initialize() { + local args=("--config" "$KAFKA_CONF_FILE" "--ignore-formatted") + info "Initializing KRaft storage metadata" + + # If cluster.id found in meta.properties, use it + if [[ -f "${KAFKA_DATA_DIR}/meta.properties" ]]; then + KAFKA_KRAFT_CLUSTER_ID=$(grep "^cluster.id=" "${KAFKA_DATA_DIR}/meta.properties" | sed -E 's/^cluster\.id=(\S+)$/\1/') + fi + + if is_empty_value "${KAFKA_KRAFT_CLUSTER_ID:-}"; then + warn "KAFKA_KRAFT_CLUSTER_ID not set - If using multiple nodes then you must use the same Cluster ID for each one" + KAFKA_KRAFT_CLUSTER_ID="$("${KAFKA_HOME}/bin/kafka-storage.sh" random-uuid)" + info "Generated Kafka cluster ID '${KAFKA_KRAFT_CLUSTER_ID}'" + fi + args+=("--cluster-id" "$KAFKA_KRAFT_CLUSTER_ID") + + # SCRAM users are configured during the cluster bootstrapping process and can later be manually updated using kafka-config.sh + if is_boolean_yes "${KAFKA_KRAFT_BOOTSTRAP_SCRAM_USERS:-}"; then + info "Adding KRaft SCRAM users at storage bootstrap" + read -r -a users <<<"$(tr ',;' ' ' <<<"${KAFKA_CLIENT_USERS}")" + read -r -a passwords <<<"$(tr ',;' ' ' <<<"${KAFKA_CLIENT_PASSWORDS}")" + # Configure SCRAM-SHA-256 if enabled + if grep -Eq "^sasl.enabled.mechanisms=.*SCRAM-SHA-256" "$KAFKA_CONF_FILE"; then + for ((i = 0; i < ${#users[@]}; i++)); do + args+=("--add-scram" "SCRAM-SHA-256=[name=${users[i]},password=${passwords[i]}]") + done + fi + # Configure SCRAM-SHA-512 if enabled + if grep -Eq "^sasl.enabled.mechanisms=.*SCRAM-SHA-512" "$KAFKA_CONF_FILE"; then + for ((i = 0; i < ${#users[@]}; i++)); do + args+=("--add-scram" "SCRAM-SHA-512=[name=${users[i]},password=${passwords[i]}]") + done + fi + # Add interbroker credentials + if grep -Eq "^sasl.mechanism.inter.broker.protocol=SCRAM-SHA-256" "$KAFKA_CONF_FILE"; then + args+=("--add-scram" "SCRAM-SHA-256=[name=${KAFKA_INTER_BROKER_USER},password=${KAFKA_INTER_BROKER_PASSWORD}]") + elif grep -Eq "^sasl.mechanism.inter.broker.protocol=SCRAM-SHA-512" "$KAFKA_CONF_FILE"; then + args+=("--add-scram" "SCRAM-SHA-512=[name=${KAFKA_INTER_BROKER_USER},password=${KAFKA_INTER_BROKER_PASSWORD}]") + fi + # Add controller credentials + if grep -Eq "^sasl.mechanism.controller.protocol=SCRAM-SHA-256" "$KAFKA_CONF_FILE"; then + args+=("--add-scram" "SCRAM-SHA-256=[name=${KAFKA_CONTROLLER_USER},password=${KAFKA_CONTROLLER_PASSWORD}]") + elif grep -Eq "^sasl.mechanism.controller.protocol=SCRAM-SHA-512" "$KAFKA_CONF_FILE"; then + args+=("--add-scram" "SCRAM-SHA-512=[name=${KAFKA_CONTROLLER_USER},password=${KAFKA_CONTROLLER_PASSWORD}]") + fi + fi + info "Formatting storage directories to add metadata..." + "${KAFKA_HOME}/bin/kafka-storage.sh" format "${args[@]}" +} + +######################## +# Detects inconsitences between the configuration at KAFKA_CONF_FILE and cluster-state file +# Globals: +# KAFKA_* +# Arguments: +# None +# Returns: +# None +######################### +kafka_kraft_quorum_voters_changed(){ + read -r -a quorum_voters_conf_ids <<<"$(grep "^controller.quorum.voters=" "$KAFKA_CONF_FILE" | sed "s/^controller.quorum.voters=//" | tr "," " " | sed -E "s/\@\S+//g")" + read -r -a quorum_voters_state_ids <<< "$(grep -Eo "\{\"voterId\":[0-9]+\}" "${KAFKA_DATA_DIR}/__cluster_metadata-0/quorum-state" | grep -Eo "[0-9]+" | tr "\n" " ")" + + if [[ "${#quorum_voters_conf_ids[@]}" != "${#quorum_voters_state_ids[@]}" ]]; then + true + else + read -r -a sorted_state <<< "$(echo "${quorum_voters_conf_ids[@]}" | tr ' ' '\n' | sort | tr '\n' ' ')" + read -r -a sorted_conf <<< "$(echo "${quorum_voters_state_ids[@]}" | tr ' ' '\n' | sort | tr '\n' ' ')" + if [[ "${sorted_state[*]}" = "${sorted_conf[*]}" ]]; then + false + else + true + fi + fi +} + +######################## +# Initialize Kafka +# Globals: +# KAFKA_* +# Arguments: +# None +# Returns: +# None +######################### +kafka_initialize() { + info "Initializing Kafka..." + # Check for mounted configuration files + if ! is_dir_empty "$KAFKA_MOUNTED_CONF_DIR"; then + cp -Lr "$KAFKA_MOUNTED_CONF_DIR"/* "$KAFKA_CONF_DIR" + fi + # Copy truststore to cert directory + for cert_var in KAFKA_TLS_TRUSTSTORE_FILE KAFKA_ZOOKEEPER_TLS_TRUSTSTORE_FILE; do + # Only copy if the file exists and it is in a different location than KAFKA_CERTS_DIR (to avoid copying to the same location) + if [[ -f "${!cert_var}" ]] && ! [[ "${!cert_var}" =~ $KAFKA_CERTS_DIR ]]; then + info "Copying truststore ${!cert_var} to ${KAFKA_CERTS_DIR}" + cp -L "${!cert_var}" "$KAFKA_CERTS_DIR" + fi + done + + if [[ ! -f "${KAFKA_MOUNTED_CONF_DIR}/server.properties" ]]; then + info "No injected configuration files found, creating default config files" + # Restore original server.properties but remove Zookeeper/KRaft specific settings for compatibility with both architectures + cp "${KAFKA_CONF_DIR}/server.properties.original" "$KAFKA_CONF_FILE" + kafka_server_unify_conf + # Configure Kafka settings + kafka_server_conf_set log.dirs "$KAFKA_DATA_DIR" + kafka_configure_from_environment_variables + # Configure Kafka producer/consumer to set up message sizes + ! is_empty_value "${KAFKA_CFG_MAX_REQUEST_SIZE:-}" && kafka_common_conf_set "$KAFKA_CONF_DIR/producer.properties" max.request.size "$KAFKA_CFG_MAX_REQUEST_SIZE" + ! is_empty_value "${KAFKA_CFG_MAX_PARTITION_FETCH_BYTES:-}" && kafka_common_conf_set "$KAFKA_CONF_DIR/consumer.properties" max.partition.fetch.bytes "$KAFKA_CFG_MAX_PARTITION_FETCH_BYTES" + # Zookeeper mode additional settings + if ! is_empty_value "${KAFKA_CFG_ZOOKEEPER_CONNECT:-}"; then + if [[ "$KAFKA_ZOOKEEPER_PROTOCOL" =~ SSL ]]; then + kafka_zookeeper_configure_tls + fi + if [[ "$KAFKA_ZOOKEEPER_PROTOCOL" =~ SASL ]]; then + kafka_zookeeper_configure_jaas + fi + fi + # If at least one listener uses SSL or SASL_SSL, ensure SSL is configured + if kafka_has_ssl_listener; then + kafka_configure_ssl + fi + # If at least one listener uses SASL_PLAINTEXT or SASL_SSL, ensure SASL is configured + if kafka_has_sasl_listener; then + if [[ "$KAFKA_CFG_SASL_ENABLED_MECHANISMS" =~ SCRAM ]]; then + if ! is_empty_value "${KAFKA_CFG_PROCESS_ROLES:-}"; then + if [[ "$(kafka_get_version)" =~ ^3\.2\.|^3\.3\.|^3\.4\. ]]; then + # NOTE: This will depend on Kafka version when support for SCRAM is added + warn "KRaft mode requires Kafka version 3.5 or higher for SCRAM to be supported. SCRAM SASL mechanisms will now be disabled." + KAFKA_CFG_SASL_ENABLED_MECHANISMS=PLAIN + else + export KAFKA_KRAFT_BOOTSTRAP_SCRAM_USERS="true" + fi + fi + if ! is_empty_value "${KAFKA_CFG_ZOOKEEPER_CONNECT:-}"; then + export KAFKA_ZOOKEEPER_BOOTSTRAP_SCRAM_USERS="true" + fi + fi + kafka_server_conf_set sasl.enabled.mechanisms "$KAFKA_CFG_SASL_ENABLED_MECHANISMS" + fi + # Settings for each Kafka Listener are configured individually + read -r -a protocol_maps <<<"$(tr ',' ' ' <<<"$KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP")" + for protocol_map in "${protocol_maps[@]}"; do + read -r -a map <<<"$(tr ':' ' ' <<<"$protocol_map")" + # Obtain the listener and protocol from protocol map string, e.g. CONTROLLER:PLAINTEXT + listener="${map[0]}" + protocol="${map[1]}" + listener_lower="$(echo "$listener" | tr '[:upper:]' '[:lower:]')" + + if [[ "$protocol" = "SSL" || "$protocol" = "SASL_SSL" ]]; then + listener_upper="$(echo "$listener" | tr '[:lower:]' '[:upper:]')" + env_name="KAFKA_TLS_${listener_upper}_CLIENT_AUTH" + [[ -n "${!env_name:-}" ]] && kafka_server_conf_set "listener.name.${listener_lower}.ssl.client.auth" "${!env_name}" + fi + if [[ "$protocol" = "SASL_PLAINTEXT" || "$protocol" = "SASL_SSL" ]]; then + local role="" + if [[ "$listener" = "${KAFKA_CFG_INTER_BROKER_LISTENER_NAME:-INTERNAL}" ]]; then + kafka_server_conf_set sasl.mechanism.inter.broker.protocol "$KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL" + role="inter-broker" + elif [[ "${KAFKA_CFG_CONTROLLER_LISTENER_NAMES:-CONTROLLER}" =~ $listener ]]; then + kafka_server_conf_set sasl.mechanism.controller.protocol "$KAFKA_CFG_SASL_MECHANISM_CONTROLLER_PROTOCOL" + kafka_server_conf_set "listener.name.${listener_lower}.sasl.enabled.mechanisms" "$KAFKA_CFG_SASL_MECHANISM_CONTROLLER_PROTOCOL" + role="controller" + fi + # If KAFKA_CLIENT_LISTENER_NAME is found in the listeners list, configure the producer/consumer accordingly + if [[ "$listener" = "${KAFKA_CLIENT_LISTENER_NAME:-CLIENT}" ]]; then + kafka_configure_consumer_producer_jaas + kafka_producer_consumer_conf_set security.protocol "$protocol" + kafka_producer_consumer_conf_set sasl.mechanism "${KAFKA_CLIENT_SASL_MECHANISM:-$(kafka_client_sasl_mechanism)}" + fi + kafka_configure_server_jaas "$listener_lower" "${role:-}" + fi + done + else + info "Detected mounted server.properties file at ${KAFKA_MOUNTED_CONF_DIR}/server.properties. Skipping configuration based on env variables" + fi + true +} + +######################## +# Returns the most secure SASL mechanism available for Kafka clients +# Globals: +# KAFKA_* +# Arguments: +# None +# Returns: +# None +######################## +kafka_client_sasl_mechanism() { + local sasl_mechanism="" + + if [[ "$KAFKA_CFG_SASL_ENABLED_MECHANISMS" =~ SCRAM-SHA-512 ]]; then + sasl_mechanism="SCRAM-SHA-512" + elif [[ "$KAFKA_CFG_SASL_ENABLED_MECHANISMS" =~ SCRAM-SHA-256 ]]; then + sasl_mechanism="SCRAM-SHA-256" + elif [[ "$KAFKA_CFG_SASL_ENABLED_MECHANISMS" =~ PLAIN ]]; then + sasl_mechanism="PLAIN" + fi + echo "$sasl_mechanism" +} + +######################## +# Removes default settings referencing Zookeeper mode or KRaft mode +# Globals: +# KAFKA_* +# Arguments: +# None +# Returns: +# None +######################## +kafka_server_unify_conf() { + local -r remove_regexps=( + #Zookeeper + "s/^zookeeper\./#zookeeper./g" + "s/^group\.initial/#group.initial/g" + "s/^broker\./#broker./g" + "s/^node\./#node./g" + "s/^process\./#process./g" + "s/^listeners=/#listeners=/g" + "s/^listener\./#listener./g" + "s/^controller\./#controller./g" + "s/^inter\.broker/#inter.broker/g" + "s/^advertised\.listeners/#advertised.listeners/g" + ) + + # Map environment variables to config properties + for regex in "${remove_regexps[@]}"; do + sed -i "${regex}" "$KAFKA_CONF_FILE" + done +} + +######################## +# Dinamically set node.id/broker.id/controller.quorum.voters if their alternative environment variable _COMMAND is set +# Globals: +# KAFKA_*_COMMAND +# Arguments: +# None +# Returns: +# None +######################### +kafka_dynamic_environment_variables() { + # KRaft mode + if ! is_empty_value "${KAFKA_NODE_ID_COMMAND:-}"; then + KAFKA_CFG_NODE_ID="$(eval "${KAFKA_NODE_ID_COMMAND}")" + export KAFKA_CFG_NODE_ID + fi + if ! is_empty_value "${KAFKA_CONTROLLER_QUORUM_VOTERS_COMMAND:-}"; then + KAFKA_CFG_CONTROLLER_QUORUM_VOTERS="$(eval "${KAFKA_CONTROLLER_QUORUM_VOTERS_COMMAND}")" + export KAFKA_CFG_CONTROLLER_QUORUM_VOTERS + fi + # Zookeeper mode + # DEPRECATED - BROKER_ID_COMMAND has been deprecated, please use KAFKA_BROKER_ID_COMMAND instead + if ! is_empty_value "${KAFKA_BROKER_ID_COMMAND:-}"; then + KAFKA_CFG_BROKER_ID="$(eval "${KAFKA_BROKER_ID_COMMAND}")" + export KAFKA_CFG_BROKER_ID + elif ! is_empty_value "${BROKER_ID_COMMAND:-}"; then + KAFKA_CFG_BROKER_ID="$(eval "${BROKER_ID_COMMAND}")" + export KAFKA_CFG_BROKER_ID + fi +} + +######################## +# Run custom initialization scripts +# Globals: +# KAFKA_* +# Arguments: +# None +# Returns: +# None +######################### +kafka_custom_init_scripts() { + if [[ -n $(find "${KAFKA_INITSCRIPTS_DIR}/" -type f -regex ".*\.\(sh\)") ]] && [[ ! -f "${KAFKA_VOLUME_DIR}/.user_scripts_initialized" ]]; then + info "Loading user's custom files from $KAFKA_INITSCRIPTS_DIR" + for f in /docker-entrypoint-initdb.d/*; do + debug "Executing $f" + case "$f" in + *.sh) + if [[ -x "$f" ]]; then + if ! "$f"; then + error "Failed executing $f" + return 1 + fi + else + warn "Sourcing $f as it is not executable by the current user, any error may cause initialization to fail" + . "$f" + fi + ;; + *) + warn "Skipping $f, supported formats are: .sh" + ;; + esac + done + touch "$KAFKA_VOLUME_DIR"/.user_scripts_initialized + fi +} + +######################## +# Check if Kafka is running +# Globals: +# KAFKA_PID_FILE +# Arguments: +# None +# Returns: +# Whether Kafka is running +######################## +is_kafka_running() { + local pid + pid="$(get_pid_from_file "$KAFKA_PID_FILE")" + if [[ -n "$pid" ]]; then + is_service_running "$pid" + else + false + fi +} + +######################## +# Check if Kafka is running +# Globals: +# KAFKA_PID_FILE +# Arguments: +# None +# Returns: +# Whether Kafka is not running +######################## +is_kafka_not_running() { + ! is_kafka_running +} + +######################## +# Stop Kafka +# Globals: +# KAFKA_PID_FILE +# Arguments: +# None +# Returns: +# None +######################### +kafka_stop() { + ! is_kafka_running && return + stop_service_using_pid "$KAFKA_PID_FILE" TERM +} diff --git a/bitnami/kafka/3.6/debian-11/tags-info.yaml b/bitnami/kafka/3.6/debian-11/tags-info.yaml new file mode 100644 index 000000000000..986d2339022f --- /dev/null +++ b/bitnami/kafka/3.6/debian-11/tags-info.yaml @@ -0,0 +1,5 @@ +rolling-tags: +- "3.6" +- 3.6-debian-11 +- 3.6.0 +- latest diff --git a/bitnami/kafka/docker-compose-cluster.yml b/bitnami/kafka/docker-compose-cluster.yml index 0e7471f9242c..6f2413ca8578 100644 --- a/bitnami/kafka/docker-compose-cluster.yml +++ b/bitnami/kafka/docker-compose-cluster.yml @@ -5,7 +5,7 @@ version: "2" services: kafka-0: - image: docker.io/bitnami/kafka:3.5 + image: docker.io/bitnami/kafka:3.6 ports: - "9092" environment: @@ -23,7 +23,7 @@ services: volumes: - kafka_0_data:/bitnami/kafka kafka-1: - image: docker.io/bitnami/kafka:3.5 + image: docker.io/bitnami/kafka:3.6 ports: - "9092" environment: @@ -41,7 +41,7 @@ services: volumes: - kafka_1_data:/bitnami/kafka kafka-2: - image: docker.io/bitnami/kafka:3.5 + image: docker.io/bitnami/kafka:3.6 ports: - "9092" environment: diff --git a/bitnami/kafka/docker-compose.yml b/bitnami/kafka/docker-compose.yml index 86a24cf65d53..88466f32b842 100644 --- a/bitnami/kafka/docker-compose.yml +++ b/bitnami/kafka/docker-compose.yml @@ -5,7 +5,7 @@ version: "2" services: kafka: - image: docker.io/bitnami/kafka:3.5 + image: docker.io/bitnami/kafka:3.6 ports: - "9092:9092" volumes: