diff --git a/bitnami/elasticsearch/7/debian-12/Dockerfile b/bitnami/elasticsearch/7/debian-12/Dockerfile new file mode 100644 index 000000000000..7f3df6d2fc20 --- /dev/null +++ b/bitnami/elasticsearch/7/debian-12/Dockerfile @@ -0,0 +1,63 @@ +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +FROM docker.io/bitnami/minideb:bookworm + +ARG ELASTICSEARCH_PLUGINS +ARG JAVA_EXTRA_SECURITY_DIR="/bitnami/java/extra-security" +ARG TARGETARCH + +LABEL com.vmware.cp.artifact.flavor="sha256:c50c90cfd9d12b445b011e6ad529f1ad3daea45c26d20b00732fae3cd71f6a83" \ + org.opencontainers.image.base.name="docker.io/bitnami/minideb:bookworm" \ + org.opencontainers.image.created="2024-02-20T09:25:41Z" \ + org.opencontainers.image.description="Application packaged by VMware, Inc" \ + org.opencontainers.image.licenses="Apache-2.0" \ + org.opencontainers.image.ref.name="7.17.18-debian-12-r3" \ + org.opencontainers.image.title="elasticsearch" \ + org.opencontainers.image.vendor="VMware, Inc." \ + org.opencontainers.image.version="7.17.18" + +ENV HOME="/" \ + OS_ARCH="${TARGETARCH:-amd64}" \ + OS_FLAVOUR="debian-12" \ + OS_NAME="linux" \ + PATH="/opt/bitnami/common/bin:/opt/bitnami/java/bin:/opt/bitnami/elasticsearch/bin:$PATH" + +COPY prebuildfs / +SHELL ["/bin/bash", "-o", "errexit", "-o", "nounset", "-o", "pipefail", "-c"] +# Install required system packages and dependencies +RUN install_packages ca-certificates curl libasound2-dev libc6 libfreetype6 libfreetype6-dev libgcc1 procps zlib1g +RUN mkdir -p /tmp/bitnami/pkg/cache/ ; cd /tmp/bitnami/pkg/cache/ ; \ + COMPONENTS=( \ + "yq-4.41.1-0-linux-${OS_ARCH}-debian-12" \ + "java-17.0.10-13-2-linux-${OS_ARCH}-debian-12" \ + "elasticsearch-7.17.18-1-linux-${OS_ARCH}-debian-12" \ + ) ; \ + for COMPONENT in "${COMPONENTS[@]}"; do \ + if [ ! -f "${COMPONENT}.tar.gz" ]; then \ + curl -SsLf "https://downloads.bitnami.com/files/stacksmith/${COMPONENT}.tar.gz" -O ; \ + curl -SsLf "https://downloads.bitnami.com/files/stacksmith/${COMPONENT}.tar.gz.sha256" -O ; \ + fi ; \ + sha256sum -c "${COMPONENT}.tar.gz.sha256" ; \ + tar -zxf "${COMPONENT}.tar.gz" -C /opt/bitnami --strip-components=2 --no-same-owner --wildcards '*/files' ; \ + rm -rf "${COMPONENT}".tar.gz{,.sha256} ; \ + done +RUN apt-get update && apt-get upgrade -y && \ + apt-get clean && rm -rf /var/lib/apt/lists /var/cache/apt/archives +RUN chmod g+rwX /opt/bitnami +RUN find / -perm /6000 -type f -exec chmod a-s {} \; || true + +COPY rootfs / +RUN /opt/bitnami/scripts/elasticsearch/postunpack.sh +RUN /opt/bitnami/scripts/java/postunpack.sh +ENV APP_VERSION="7.17.18" \ + BITNAMI_APP_NAME="elasticsearch" \ + ES_JAVA_HOME="/opt/bitnami/java" \ + JAVA_HOME="/opt/bitnami/java" \ + LD_LIBRARY_PATH="/opt/bitnami/elasticsearch/jdk/lib:/opt/bitnami/elasticsearch/jdk/lib/server:$LD_LIBRARY_PATH" + +EXPOSE 9200 9300 + +USER 1001 +ENTRYPOINT [ "/opt/bitnami/scripts/elasticsearch/entrypoint.sh" ] +CMD [ "/opt/bitnami/scripts/elasticsearch/run.sh" ] diff --git a/bitnami/elasticsearch/7/debian-12/docker-compose.yml b/bitnami/elasticsearch/7/debian-12/docker-compose.yml new file mode 100644 index 000000000000..df84adbab13a --- /dev/null +++ b/bitnami/elasticsearch/7/debian-12/docker-compose.yml @@ -0,0 +1,16 @@ +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +version: '2' + +services: + elasticsearch: + image: docker.io/bitnami/elasticsearch:7 + ports: + - '9200:9200' + - '9300:9300' + volumes: + - 'elasticsearch_data:/bitnami/elasticsearch/data' +volumes: + elasticsearch_data: + driver: local diff --git a/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/.bitnami_components.json b/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/.bitnami_components.json new file mode 100644 index 000000000000..61284bc4faa6 --- /dev/null +++ b/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/.bitnami_components.json @@ -0,0 +1,20 @@ +{ + "elasticsearch": { + "arch": "amd64", + "distro": "debian-12", + "type": "NAMI", + "version": "7.17.18-1" + }, + "java": { + "arch": "amd64", + "distro": "debian-12", + "type": "NAMI", + "version": "17.0.10-13-2" + }, + "yq": { + "arch": "amd64", + "distro": "debian-12", + "type": "NAMI", + "version": "4.41.1-0" + } +} \ No newline at end of file diff --git a/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/licenses/licenses.txt b/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/licenses/licenses.txt new file mode 100644 index 000000000000..76956b38e82c --- /dev/null +++ b/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/licenses/licenses.txt @@ -0,0 +1,2 @@ +Bitnami containers ship with software bundles. You can find the licenses under: +/opt/bitnami/[name-of-bundle]/licenses/[bundle-version].txt diff --git a/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libbitnami.sh b/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libbitnami.sh new file mode 100644 index 000000000000..3853c789b2ea --- /dev/null +++ b/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libbitnami.sh @@ -0,0 +1,53 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Bitnami custom library + +# shellcheck disable=SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/liblog.sh + +# Constants +BOLD='\033[1m' + +# Functions + +######################## +# Print the welcome page +# Globals: +# DISABLE_WELCOME_MESSAGE +# BITNAMI_APP_NAME +# Arguments: +# None +# Returns: +# None +######################### +print_welcome_page() { + if [[ -z "${DISABLE_WELCOME_MESSAGE:-}" ]]; then + if [[ -n "$BITNAMI_APP_NAME" ]]; then + print_image_welcome_page + fi + fi +} + +######################## +# Print the welcome page for a Bitnami Docker image +# Globals: +# BITNAMI_APP_NAME +# Arguments: +# None +# Returns: +# None +######################### +print_image_welcome_page() { + local github_url="https://github.com/bitnami/containers" + + info "" + info "${BOLD}Welcome to the Bitnami ${BITNAMI_APP_NAME} container${RESET}" + info "Subscribe to project updates by watching ${BOLD}${github_url}${RESET}" + info "Submit issues and feature requests at ${BOLD}${github_url}/issues${RESET}" + info "" +} + diff --git a/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libfile.sh b/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libfile.sh new file mode 100644 index 000000000000..63759c777f3b --- /dev/null +++ b/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libfile.sh @@ -0,0 +1,141 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Library for managing files + +# shellcheck disable=SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/libos.sh + +# Functions + +######################## +# Replace a regex-matching string in a file +# Arguments: +# $1 - filename +# $2 - match regex +# $3 - substitute regex +# $4 - use POSIX regex. Default: true +# Returns: +# None +######################### +replace_in_file() { + local filename="${1:?filename is required}" + local match_regex="${2:?match regex is required}" + local substitute_regex="${3:?substitute regex is required}" + local posix_regex=${4:-true} + + local result + + # We should avoid using 'sed in-place' substitutions + # 1) They are not compatible with files mounted from ConfigMap(s) + # 2) We found incompatibility issues with Debian10 and "in-place" substitutions + local -r del=$'\001' # Use a non-printable character as a 'sed' delimiter to avoid issues + if [[ $posix_regex = true ]]; then + result="$(sed -E "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")" + else + result="$(sed "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")" + fi + echo "$result" > "$filename" +} + +######################## +# Replace a regex-matching multiline string in a file +# Arguments: +# $1 - filename +# $2 - match regex +# $3 - substitute regex +# Returns: +# None +######################### +replace_in_file_multiline() { + local filename="${1:?filename is required}" + local match_regex="${2:?match regex is required}" + local substitute_regex="${3:?substitute regex is required}" + + local result + local -r del=$'\001' # Use a non-printable character as a 'sed' delimiter to avoid issues + result="$(perl -pe "BEGIN{undef $/;} s${del}${match_regex}${del}${substitute_regex}${del}sg" "$filename")" + echo "$result" > "$filename" +} + +######################## +# Remove a line in a file based on a regex +# Arguments: +# $1 - filename +# $2 - match regex +# $3 - use POSIX regex. Default: true +# Returns: +# None +######################### +remove_in_file() { + local filename="${1:?filename is required}" + local match_regex="${2:?match regex is required}" + local posix_regex=${3:-true} + local result + + # We should avoid using 'sed in-place' substitutions + # 1) They are not compatible with files mounted from ConfigMap(s) + # 2) We found incompatibility issues with Debian10 and "in-place" substitutions + if [[ $posix_regex = true ]]; then + result="$(sed -E "/$match_regex/d" "$filename")" + else + result="$(sed "/$match_regex/d" "$filename")" + fi + echo "$result" > "$filename" +} + +######################## +# Appends text after the last line matching a pattern +# Arguments: +# $1 - file +# $2 - match regex +# $3 - contents to add +# Returns: +# None +######################### +append_file_after_last_match() { + local file="${1:?missing file}" + local match_regex="${2:?missing pattern}" + local value="${3:?missing value}" + + # We read the file in reverse, replace the first match (0,/pattern/s) and then reverse the results again + result="$(tac "$file" | sed -E "0,/($match_regex)/s||${value}\n\1|" | tac)" + echo "$result" > "$file" +} + +######################## +# Wait until certain entry is present in a log file +# Arguments: +# $1 - entry to look for +# $2 - log file +# $3 - max retries. Default: 12 +# $4 - sleep between retries (in seconds). Default: 5 +# Returns: +# Boolean +######################### +wait_for_log_entry() { + local -r entry="${1:-missing entry}" + local -r log_file="${2:-missing log file}" + local -r retries="${3:-12}" + local -r interval_time="${4:-5}" + local attempt=0 + + check_log_file_for_entry() { + if ! grep -qE "$entry" "$log_file"; then + debug "Entry \"${entry}\" still not present in ${log_file} (attempt $((++attempt))/${retries})" + return 1 + fi + } + debug "Checking that ${log_file} log file contains entry \"${entry}\"" + if retry_while check_log_file_for_entry "$retries" "$interval_time"; then + debug "Found entry \"${entry}\" in ${log_file}" + true + else + error "Could not find entry \"${entry}\" in ${log_file} after ${retries} retries" + debug_execute cat "$log_file" + return 1 + fi +} diff --git a/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libfs.sh b/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libfs.sh new file mode 100644 index 000000000000..96b22f99710c --- /dev/null +++ b/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libfs.sh @@ -0,0 +1,193 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Library for file system actions + +# shellcheck disable=SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/liblog.sh + +# Functions + +######################## +# Ensure a file/directory is owned (user and group) but the given user +# Arguments: +# $1 - filepath +# $2 - owner +# Returns: +# None +######################### +owned_by() { + local path="${1:?path is missing}" + local owner="${2:?owner is missing}" + local group="${3:-}" + + if [[ -n $group ]]; then + chown "$owner":"$group" "$path" + else + chown "$owner":"$owner" "$path" + fi +} + +######################## +# Ensure a directory exists and, optionally, is owned by the given user +# Arguments: +# $1 - directory +# $2 - owner +# Returns: +# None +######################### +ensure_dir_exists() { + local dir="${1:?directory is missing}" + local owner_user="${2:-}" + local owner_group="${3:-}" + + [ -d "${dir}" ] || mkdir -p "${dir}" + if [[ -n $owner_user ]]; then + owned_by "$dir" "$owner_user" "$owner_group" + fi +} + +######################## +# Checks whether a directory is empty or not +# arguments: +# $1 - directory +# returns: +# boolean +######################### +is_dir_empty() { + local -r path="${1:?missing directory}" + # Calculate real path in order to avoid issues with symlinks + local -r dir="$(realpath "$path")" + if [[ ! -e "$dir" ]] || [[ -z "$(ls -A "$dir")" ]]; then + true + else + false + fi +} + +######################## +# Checks whether a mounted directory is empty or not +# arguments: +# $1 - directory +# returns: +# boolean +######################### +is_mounted_dir_empty() { + local dir="${1:?missing directory}" + + if is_dir_empty "$dir" || find "$dir" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" -exec false {} +; then + true + else + false + fi +} + +######################## +# Checks whether a file can be written to or not +# arguments: +# $1 - file +# returns: +# boolean +######################### +is_file_writable() { + local file="${1:?missing file}" + local dir + dir="$(dirname "$file")" + + if [[ (-f "$file" && -w "$file") || (! -f "$file" && -d "$dir" && -w "$dir") ]]; then + true + else + false + fi +} + +######################## +# Relativize a path +# arguments: +# $1 - path +# $2 - base +# returns: +# None +######################### +relativize() { + local -r path="${1:?missing path}" + local -r base="${2:?missing base}" + pushd "$base" >/dev/null || exit + realpath -q --no-symlinks --relative-base="$base" "$path" | sed -e 's|^/$|.|' -e 's|^/||' + popd >/dev/null || exit +} + +######################## +# Configure permisions and ownership recursively +# Globals: +# None +# Arguments: +# $1 - paths (as a string). +# Flags: +# -f|--file-mode - mode for directories. +# -d|--dir-mode - mode for files. +# -u|--user - user +# -g|--group - group +# Returns: +# None +######################### +configure_permissions_ownership() { + local -r paths="${1:?paths is missing}" + local dir_mode="" + local file_mode="" + local user="" + local group="" + + # Validate arguments + shift 1 + while [ "$#" -gt 0 ]; do + case "$1" in + -f | --file-mode) + shift + file_mode="${1:?missing mode for files}" + ;; + -d | --dir-mode) + shift + dir_mode="${1:?missing mode for directories}" + ;; + -u | --user) + shift + user="${1:?missing user}" + ;; + -g | --group) + shift + group="${1:?missing group}" + ;; + *) + echo "Invalid command line flag $1" >&2 + return 1 + ;; + esac + shift + done + + read -r -a filepaths <<<"$paths" + for p in "${filepaths[@]}"; do + if [[ -e "$p" ]]; then + find -L "$p" -printf "" + if [[ -n $dir_mode ]]; then + find -L "$p" -type d ! -perm "$dir_mode" -print0 | xargs -r -0 chmod "$dir_mode" + fi + if [[ -n $file_mode ]]; then + find -L "$p" -type f ! -perm "$file_mode" -print0 | xargs -r -0 chmod "$file_mode" + fi + if [[ -n $user ]] && [[ -n $group ]]; then + find -L "$p" -print0 | xargs -r -0 chown "${user}:${group}" + elif [[ -n $user ]] && [[ -z $group ]]; then + find -L "$p" -print0 | xargs -r -0 chown "${user}" + elif [[ -z $user ]] && [[ -n $group ]]; then + find -L "$p" -print0 | xargs -r -0 chgrp "${group}" + fi + else + stderr_print "$p does not exist" + fi + done +} diff --git a/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libhook.sh b/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libhook.sh new file mode 100644 index 000000000000..dadd06149e00 --- /dev/null +++ b/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libhook.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Library to use for scripts expected to be used as Kubernetes lifecycle hooks + +# shellcheck disable=SC1091 + +# Load generic libraries +. /opt/bitnami/scripts/liblog.sh +. /opt/bitnami/scripts/libos.sh + +# Override functions that log to stdout/stderr of the current process, so they print to process 1 +for function_to_override in stderr_print debug_execute; do + # Output is sent to output of process 1 and thus end up in the container log + # The hook output in general isn't saved + eval "$(declare -f "$function_to_override") >/proc/1/fd/1 2>/proc/1/fd/2" +done diff --git a/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/liblog.sh b/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/liblog.sh new file mode 100644 index 000000000000..2a9e76a4d725 --- /dev/null +++ b/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/liblog.sh @@ -0,0 +1,114 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Library for logging functions + +# Constants +RESET='\033[0m' +RED='\033[38;5;1m' +GREEN='\033[38;5;2m' +YELLOW='\033[38;5;3m' +MAGENTA='\033[38;5;5m' +CYAN='\033[38;5;6m' + +# Functions + +######################## +# Print to STDERR +# Arguments: +# Message to print +# Returns: +# None +######################### +stderr_print() { + # 'is_boolean_yes' is defined in libvalidations.sh, but depends on this file so we cannot source it + local bool="${BITNAMI_QUIET:-false}" + # comparison is performed without regard to the case of alphabetic characters + shopt -s nocasematch + if ! [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then + printf "%b\\n" "${*}" >&2 + fi +} + +######################## +# Log message +# Arguments: +# Message to log +# Returns: +# None +######################### +log() { + stderr_print "${CYAN}${MODULE:-} ${MAGENTA}$(date "+%T.%2N ")${RESET}${*}" +} +######################## +# Log an 'info' message +# Arguments: +# Message to log +# Returns: +# None +######################### +info() { + log "${GREEN}INFO ${RESET} ==> ${*}" +} +######################## +# Log message +# Arguments: +# Message to log +# Returns: +# None +######################### +warn() { + log "${YELLOW}WARN ${RESET} ==> ${*}" +} +######################## +# Log an 'error' message +# Arguments: +# Message to log +# Returns: +# None +######################### +error() { + log "${RED}ERROR${RESET} ==> ${*}" +} +######################## +# Log a 'debug' message +# Globals: +# BITNAMI_DEBUG +# Arguments: +# None +# Returns: +# None +######################### +debug() { + # 'is_boolean_yes' is defined in libvalidations.sh, but depends on this file so we cannot source it + local bool="${BITNAMI_DEBUG:-false}" + # comparison is performed without regard to the case of alphabetic characters + shopt -s nocasematch + if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then + log "${MAGENTA}DEBUG${RESET} ==> ${*}" + fi +} + +######################## +# Indent a string +# Arguments: +# $1 - string +# $2 - number of indentation characters (default: 4) +# $3 - indentation character (default: " ") +# Returns: +# None +######################### +indent() { + local string="${1:-}" + local num="${2:?missing num}" + local char="${3:-" "}" + # Build the indentation unit string + local indent_unit="" + for ((i = 0; i < num; i++)); do + indent_unit="${indent_unit}${char}" + done + # shellcheck disable=SC2001 + # Complex regex, see https://github.com/koalaman/shellcheck/wiki/SC2001#exceptions + echo "$string" | sed "s/^/${indent_unit}/" +} diff --git a/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libnet.sh b/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libnet.sh new file mode 100644 index 000000000000..b47c69a56825 --- /dev/null +++ b/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libnet.sh @@ -0,0 +1,165 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Library for network functions + +# shellcheck disable=SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/liblog.sh + +# Functions + +######################## +# Resolve IP address for a host/domain (i.e. DNS lookup) +# Arguments: +# $1 - Hostname to resolve +# $2 - IP address version (v4, v6), leave empty for resolving to any version +# Returns: +# IP +######################### +dns_lookup() { + local host="${1:?host is missing}" + local ip_version="${2:-}" + getent "ahosts${ip_version}" "$host" | awk '/STREAM/ {print $1 }' | head -n 1 +} + +######################### +# Wait for a hostname and return the IP +# Arguments: +# $1 - hostname +# $2 - number of retries +# $3 - seconds to wait between retries +# Returns: +# - IP address that corresponds to the hostname +######################### +wait_for_dns_lookup() { + local hostname="${1:?hostname is missing}" + local retries="${2:-5}" + local seconds="${3:-1}" + check_host() { + if [[ $(dns_lookup "$hostname") == "" ]]; then + false + else + true + fi + } + # Wait for the host to be ready + retry_while "check_host ${hostname}" "$retries" "$seconds" + dns_lookup "$hostname" +} + +######################## +# Get machine's IP +# Arguments: +# None +# Returns: +# Machine IP +######################### +get_machine_ip() { + local -a ip_addresses + local hostname + hostname="$(hostname)" + read -r -a ip_addresses <<< "$(dns_lookup "$hostname" | xargs echo)" + if [[ "${#ip_addresses[@]}" -gt 1 ]]; then + warn "Found more than one IP address associated to hostname ${hostname}: ${ip_addresses[*]}, will use ${ip_addresses[0]}" + elif [[ "${#ip_addresses[@]}" -lt 1 ]]; then + error "Could not find any IP address associated to hostname ${hostname}" + exit 1 + fi + echo "${ip_addresses[0]}" +} + +######################## +# Check if the provided argument is a resolved hostname +# Arguments: +# $1 - Value to check +# Returns: +# Boolean +######################### +is_hostname_resolved() { + local -r host="${1:?missing value}" + if [[ -n "$(dns_lookup "$host")" ]]; then + true + else + false + fi +} + +######################## +# Parse URL +# Globals: +# None +# Arguments: +# $1 - uri - String +# $2 - component to obtain. Valid options (scheme, authority, userinfo, host, port, path, query or fragment) - String +# Returns: +# String +parse_uri() { + local uri="${1:?uri is missing}" + local component="${2:?component is missing}" + + # Solution based on https://tools.ietf.org/html/rfc3986#appendix-B with + # additional sub-expressions to split authority into userinfo, host and port + # Credits to Patryk Obara (see https://stackoverflow.com/a/45977232/6694969) + local -r URI_REGEX='^(([^:/?#]+):)?(//((([^@/?#]+)@)?([^:/?#]+)(:([0-9]+))?))?(/([^?#]*))?(\?([^#]*))?(#(.*))?' + # || | ||| | | | | | | | | | + # |2 scheme | ||6 userinfo 7 host | 9 port | 11 rpath | 13 query | 15 fragment + # 1 scheme: | |5 userinfo@ 8 :... 10 path 12 ?... 14 #... + # | 4 authority + # 3 //... + local index=0 + case "$component" in + scheme) + index=2 + ;; + authority) + index=4 + ;; + userinfo) + index=6 + ;; + host) + index=7 + ;; + port) + index=9 + ;; + path) + index=10 + ;; + query) + index=13 + ;; + fragment) + index=14 + ;; + *) + stderr_print "unrecognized component $component" + return 1 + ;; + esac + [[ "$uri" =~ $URI_REGEX ]] && echo "${BASH_REMATCH[${index}]}" +} + +######################## +# Wait for a HTTP connection to succeed +# Globals: +# * +# Arguments: +# $1 - URL to wait for +# $2 - Maximum amount of retries (optional) +# $3 - Time between retries (optional) +# Returns: +# true if the HTTP connection succeeded, false otherwise +######################### +wait_for_http_connection() { + local url="${1:?missing url}" + local retries="${2:-}" + local sleep_time="${3:-}" + if ! retry_while "debug_execute curl --silent ${url}" "$retries" "$sleep_time"; then + error "Could not connect to ${url}" + return 1 + fi +} diff --git a/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libos.sh b/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libos.sh new file mode 100644 index 000000000000..c0500acee78d --- /dev/null +++ b/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libos.sh @@ -0,0 +1,657 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Library for operating system actions + +# shellcheck disable=SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/liblog.sh +. /opt/bitnami/scripts/libfs.sh +. /opt/bitnami/scripts/libvalidations.sh + +# Functions + +######################## +# Check if an user exists in the system +# Arguments: +# $1 - user +# Returns: +# Boolean +######################### +user_exists() { + local user="${1:?user is missing}" + id "$user" >/dev/null 2>&1 +} + +######################## +# Check if a group exists in the system +# Arguments: +# $1 - group +# Returns: +# Boolean +######################### +group_exists() { + local group="${1:?group is missing}" + getent group "$group" >/dev/null 2>&1 +} + +######################## +# Create a group in the system if it does not exist already +# Arguments: +# $1 - group +# Flags: +# -i|--gid - the ID for the new group +# -s|--system - Whether to create new user as system user (uid <= 999) +# Returns: +# None +######################### +ensure_group_exists() { + local group="${1:?group is missing}" + local gid="" + local is_system_user=false + + # Validate arguments + shift 1 + while [ "$#" -gt 0 ]; do + case "$1" in + -i | --gid) + shift + gid="${1:?missing gid}" + ;; + -s | --system) + is_system_user=true + ;; + *) + echo "Invalid command line flag $1" >&2 + return 1 + ;; + esac + shift + done + + if ! group_exists "$group"; then + local -a args=("$group") + if [[ -n "$gid" ]]; then + if group_exists "$gid"; then + error "The GID $gid is already in use." >&2 + return 1 + fi + args+=("--gid" "$gid") + fi + $is_system_user && args+=("--system") + groupadd "${args[@]}" >/dev/null 2>&1 + fi +} + +######################## +# Create an user in the system if it does not exist already +# Arguments: +# $1 - user +# Flags: +# -i|--uid - the ID for the new user +# -g|--group - the group the new user should belong to +# -a|--append-groups - comma-separated list of supplemental groups to append to the new user +# -h|--home - the home directory for the new user +# -s|--system - whether to create new user as system user (uid <= 999) +# Returns: +# None +######################### +ensure_user_exists() { + local user="${1:?user is missing}" + local uid="" + local group="" + local append_groups="" + local home="" + local is_system_user=false + + # Validate arguments + shift 1 + while [ "$#" -gt 0 ]; do + case "$1" in + -i | --uid) + shift + uid="${1:?missing uid}" + ;; + -g | --group) + shift + group="${1:?missing group}" + ;; + -a | --append-groups) + shift + append_groups="${1:?missing append_groups}" + ;; + -h | --home) + shift + home="${1:?missing home directory}" + ;; + -s | --system) + is_system_user=true + ;; + *) + echo "Invalid command line flag $1" >&2 + return 1 + ;; + esac + shift + done + + if ! user_exists "$user"; then + local -a user_args=("-N" "$user") + if [[ -n "$uid" ]]; then + if user_exists "$uid"; then + error "The UID $uid is already in use." + return 1 + fi + user_args+=("--uid" "$uid") + else + $is_system_user && user_args+=("--system") + fi + useradd "${user_args[@]}" >/dev/null 2>&1 + fi + + if [[ -n "$group" ]]; then + local -a group_args=("$group") + $is_system_user && group_args+=("--system") + ensure_group_exists "${group_args[@]}" + usermod -g "$group" "$user" >/dev/null 2>&1 + fi + + if [[ -n "$append_groups" ]]; then + local -a groups + read -ra groups <<<"$(tr ',;' ' ' <<<"$append_groups")" + for group in "${groups[@]}"; do + ensure_group_exists "$group" + usermod -aG "$group" "$user" >/dev/null 2>&1 + done + fi + + if [[ -n "$home" ]]; then + mkdir -p "$home" + usermod -d "$home" "$user" >/dev/null 2>&1 + configure_permissions_ownership "$home" -d "775" -f "664" -u "$user" -g "$group" + fi +} + +######################## +# Check if the script is currently running as root +# Arguments: +# $1 - user +# $2 - group +# Returns: +# Boolean +######################### +am_i_root() { + if [[ "$(id -u)" = "0" ]]; then + true + else + false + fi +} + +######################## +# Print OS metadata +# Arguments: +# $1 - Flag name +# Flags: +# --id - Distro ID +# --version - Distro version +# --branch - Distro branch +# --codename - Distro codename +# --name - Distro name +# --pretty-name - Distro pretty name +# Returns: +# String +######################### +get_os_metadata() { + local -r flag_name="${1:?missing flag}" + # Helper function + get_os_release_metadata() { + local -r env_name="${1:?missing environment variable name}" + ( + . /etc/os-release + echo "${!env_name}" + ) + } + case "$flag_name" in + --id) + get_os_release_metadata ID + ;; + --version) + get_os_release_metadata VERSION_ID + ;; + --branch) + get_os_release_metadata VERSION_ID | sed 's/\..*//' + ;; + --codename) + get_os_release_metadata VERSION_CODENAME + ;; + --name) + get_os_release_metadata NAME + ;; + --pretty-name) + get_os_release_metadata PRETTY_NAME + ;; + *) + error "Unknown flag ${flag_name}" + return 1 + ;; + esac +} + +######################## +# Get total memory available +# Arguments: +# None +# Returns: +# Memory in bytes +######################### +get_total_memory() { + echo $(($(grep MemTotal /proc/meminfo | awk '{print $2}') / 1024)) +} + +######################## +# Get machine size depending on specified memory +# Globals: +# None +# Arguments: +# None +# Flags: +# --memory - memory size (optional) +# Returns: +# Detected instance size +######################### +get_machine_size() { + local memory="" + # Validate arguments + while [[ "$#" -gt 0 ]]; do + case "$1" in + --memory) + shift + memory="${1:?missing memory}" + ;; + *) + echo "Invalid command line flag $1" >&2 + return 1 + ;; + esac + shift + done + if [[ -z "$memory" ]]; then + debug "Memory was not specified, detecting available memory automatically" + memory="$(get_total_memory)" + fi + sanitized_memory=$(convert_to_mb "$memory") + if [[ "$sanitized_memory" -gt 26000 ]]; then + echo 2xlarge + elif [[ "$sanitized_memory" -gt 13000 ]]; then + echo xlarge + elif [[ "$sanitized_memory" -gt 6000 ]]; then + echo large + elif [[ "$sanitized_memory" -gt 3000 ]]; then + echo medium + elif [[ "$sanitized_memory" -gt 1500 ]]; then + echo small + else + echo micro + fi +} + +######################## +# Get machine size depending on specified memory +# Globals: +# None +# Arguments: +# $1 - memory size (optional) +# Returns: +# Detected instance size +######################### +get_supported_machine_sizes() { + echo micro small medium large xlarge 2xlarge +} + +######################## +# Convert memory size from string to amount of megabytes (i.e. 2G -> 2048) +# Globals: +# None +# Arguments: +# $1 - memory size +# Returns: +# Result of the conversion +######################### +convert_to_mb() { + local amount="${1:-}" + if [[ $amount =~ ^([0-9]+)(m|M|g|G) ]]; then + size="${BASH_REMATCH[1]}" + unit="${BASH_REMATCH[2]}" + if [[ "$unit" = "g" || "$unit" = "G" ]]; then + amount="$((size * 1024))" + else + amount="$size" + fi + fi + echo "$amount" +} + +######################### +# Redirects output to /dev/null if debug mode is disabled +# Globals: +# BITNAMI_DEBUG +# Arguments: +# $@ - Command to execute +# Returns: +# None +######################### +debug_execute() { + if is_boolean_yes "${BITNAMI_DEBUG:-false}"; then + "$@" + else + "$@" >/dev/null 2>&1 + fi +} + +######################## +# Retries a command a given number of times +# Arguments: +# $1 - cmd (as a string) +# $2 - max retries. Default: 12 +# $3 - sleep between retries (in seconds). Default: 5 +# Returns: +# Boolean +######################### +retry_while() { + local cmd="${1:?cmd is missing}" + local retries="${2:-12}" + local sleep_time="${3:-5}" + local return_value=1 + + read -r -a command <<<"$cmd" + for ((i = 1; i <= retries; i += 1)); do + "${command[@]}" && return_value=0 && break + sleep "$sleep_time" + done + return $return_value +} + +######################## +# Generate a random string +# Arguments: +# -t|--type - String type (ascii, alphanumeric, numeric), defaults to ascii +# -c|--count - Number of characters, defaults to 32 +# Arguments: +# None +# Returns: +# None +# Returns: +# String +######################### +generate_random_string() { + local type="ascii" + local count="32" + local filter + local result + # Validate arguments + while [[ "$#" -gt 0 ]]; do + case "$1" in + -t | --type) + shift + type="$1" + ;; + -c | --count) + shift + count="$1" + ;; + *) + echo "Invalid command line flag $1" >&2 + return 1 + ;; + esac + shift + done + # Validate type + case "$type" in + ascii) + filter="[:print:]" + ;; + numeric) + filter="0-9" + ;; + alphanumeric) + filter="a-zA-Z0-9" + ;; + alphanumeric+special|special+alphanumeric) + # Limit variety of special characters, so there is a higher chance of containing more alphanumeric characters + # Special characters are harder to write, and it could impact the overall UX if most passwords are too complex + filter='a-zA-Z0-9:@.,/+!=' + ;; + *) + echo "Invalid type ${type}" >&2 + return 1 + ;; + esac + # Obtain count + 10 lines from /dev/urandom to ensure that the resulting string has the expected size + # Note there is a very small chance of strings starting with EOL character + # Therefore, the higher amount of lines read, this will happen less frequently + result="$(head -n "$((count + 10))" /dev/urandom | tr -dc "$filter" | head -c "$count")" + echo "$result" +} + +######################## +# Create md5 hash from a string +# Arguments: +# $1 - string +# Returns: +# md5 hash - string +######################### +generate_md5_hash() { + local -r str="${1:?missing input string}" + echo -n "$str" | md5sum | awk '{print $1}' +} + +######################## +# Create sha1 hash from a string +# Arguments: +# $1 - string +# $2 - algorithm - 1 (default), 224, 256, 384, 512 +# Returns: +# sha1 hash - string +######################### +generate_sha_hash() { + local -r str="${1:?missing input string}" + local -r algorithm="${2:-1}" + echo -n "$str" | "sha${algorithm}sum" | awk '{print $1}' +} + +######################## +# Converts a string to its hexadecimal representation +# Arguments: +# $1 - string +# Returns: +# hexadecimal representation of the string +######################### +convert_to_hex() { + local -r str=${1:?missing input string} + local -i iterator + local char + for ((iterator = 0; iterator < ${#str}; iterator++)); do + char=${str:iterator:1} + printf '%x' "'${char}" + done +} + +######################## +# Get boot time +# Globals: +# None +# Arguments: +# None +# Returns: +# Boot time metadata +######################### +get_boot_time() { + stat /proc --format=%Y +} + +######################## +# Get machine ID +# Globals: +# None +# Arguments: +# None +# Returns: +# Machine ID +######################### +get_machine_id() { + local machine_id + if [[ -f /etc/machine-id ]]; then + machine_id="$(cat /etc/machine-id)" + fi + if [[ -z "$machine_id" ]]; then + # Fallback to the boot-time, which will at least ensure a unique ID in the current session + machine_id="$(get_boot_time)" + fi + echo "$machine_id" +} + +######################## +# Get the root partition's disk device ID (e.g. /dev/sda1) +# Globals: +# None +# Arguments: +# None +# Returns: +# Root partition disk ID +######################### +get_disk_device_id() { + local device_id="" + if grep -q ^/dev /proc/mounts; then + device_id="$(grep ^/dev /proc/mounts | awk '$2 == "/" { print $1 }' | tail -1)" + fi + # If it could not be autodetected, fallback to /dev/sda1 as a default + if [[ -z "$device_id" || ! -b "$device_id" ]]; then + device_id="/dev/sda1" + fi + echo "$device_id" +} + +######################## +# Get the root disk device ID (e.g. /dev/sda) +# Globals: +# None +# Arguments: +# None +# Returns: +# Root disk ID +######################### +get_root_disk_device_id() { + get_disk_device_id | sed -E 's/p?[0-9]+$//' +} + +######################## +# Get the root disk size in bytes +# Globals: +# None +# Arguments: +# None +# Returns: +# Root disk size in bytes +######################### +get_root_disk_size() { + fdisk -l "$(get_root_disk_device_id)" | grep 'Disk.*bytes' | sed -E 's/.*, ([0-9]+) bytes,.*/\1/' || true +} + +######################## +# Run command as a specific user and group (optional) +# Arguments: +# $1 - USER(:GROUP) to switch to +# $2..$n - command to execute +# Returns: +# Exit code of the specified command +######################### +run_as_user() { + run_chroot "$@" +} + +######################## +# Execute command as a specific user and group (optional), +# replacing the current process image +# Arguments: +# $1 - USER(:GROUP) to switch to +# $2..$n - command to execute +# Returns: +# Exit code of the specified command +######################### +exec_as_user() { + run_chroot --replace-process "$@" +} + +######################## +# Run a command using chroot +# Arguments: +# $1 - USER(:GROUP) to switch to +# $2..$n - command to execute +# Flags: +# -r | --replace-process - Replace the current process image (optional) +# Returns: +# Exit code of the specified command +######################### +run_chroot() { + local userspec + local user + local homedir + local replace=false + local -r cwd="$(pwd)" + + # Parse and validate flags + while [[ "$#" -gt 0 ]]; do + case "$1" in + -r | --replace-process) + replace=true + ;; + --) + shift + break + ;; + -*) + stderr_print "unrecognized flag $1" + return 1 + ;; + *) + break + ;; + esac + shift + done + + # Parse and validate arguments + if [[ "$#" -lt 2 ]]; then + echo "expected at least 2 arguments" + return 1 + else + userspec=$1 + shift + + # userspec can optionally include the group, so we parse the user + user=$(echo "$userspec" | cut -d':' -f1) + fi + + if ! am_i_root; then + error "Could not switch to '${userspec}': Operation not permitted" + return 1 + fi + + # Get the HOME directory for the user to switch, as chroot does + # not properly update this env and some scripts rely on it + homedir=$(eval echo "~${user}") + if [[ ! -d $homedir ]]; then + homedir="${HOME:-/}" + fi + + # Obtaining value for "$@" indirectly in order to properly support shell parameter expansion + if [[ "$replace" = true ]]; then + exec chroot --userspec="$userspec" / bash -c "cd ${cwd}; export HOME=${homedir}; exec \"\$@\"" -- "$@" + else + chroot --userspec="$userspec" / bash -c "cd ${cwd}; export HOME=${homedir}; exec \"\$@\"" -- "$@" + fi +} diff --git a/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libpersistence.sh b/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libpersistence.sh new file mode 100644 index 000000000000..af6af64d6dd0 --- /dev/null +++ b/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libpersistence.sh @@ -0,0 +1,124 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Bitnami persistence library +# Used for bringing persistence capabilities to applications that don't have clear separation of data and logic + +# shellcheck disable=SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/libfs.sh +. /opt/bitnami/scripts/libos.sh +. /opt/bitnami/scripts/liblog.sh +. /opt/bitnami/scripts/libversion.sh + +# Functions + +######################## +# Persist an application directory +# Globals: +# BITNAMI_ROOT_DIR +# BITNAMI_VOLUME_DIR +# Arguments: +# $1 - App folder name +# $2 - List of app files to persist +# Returns: +# true if all steps succeeded, false otherwise +######################### +persist_app() { + local -r app="${1:?missing app}" + local -a files_to_restore + read -r -a files_to_persist <<< "$(tr ',;:' ' ' <<< "$2")" + local -r install_dir="${BITNAMI_ROOT_DIR}/${app}" + local -r persist_dir="${BITNAMI_VOLUME_DIR}/${app}" + # Persist the individual files + if [[ "${#files_to_persist[@]}" -le 0 ]]; then + warn "No files are configured to be persisted" + return + fi + pushd "$install_dir" >/dev/null || exit + local file_to_persist_relative file_to_persist_destination file_to_persist_destination_folder + local -r tmp_file="/tmp/perms.acl" + for file_to_persist in "${files_to_persist[@]}"; do + if [[ ! -f "$file_to_persist" && ! -d "$file_to_persist" ]]; then + error "Cannot persist '${file_to_persist}' because it does not exist" + return 1 + fi + file_to_persist_relative="$(relativize "$file_to_persist" "$install_dir")" + file_to_persist_destination="${persist_dir}/${file_to_persist_relative}" + file_to_persist_destination_folder="$(dirname "$file_to_persist_destination")" + # Get original permissions for existing files, which will be applied later + # Exclude the root directory with 'sed', to avoid issues when copying the entirety of it to a volume + getfacl -R "$file_to_persist_relative" | sed -E '/# file: (\..+|[^.])/,$!d' > "$tmp_file" + # Copy directories to the volume + ensure_dir_exists "$file_to_persist_destination_folder" + cp -Lr --preserve=links "$file_to_persist_relative" "$file_to_persist_destination_folder" + # Restore permissions + pushd "$persist_dir" >/dev/null || exit + if am_i_root; then + setfacl --restore="$tmp_file" + else + # When running as non-root, don't change ownership + setfacl --restore=<(grep -E -v '^# (owner|group):' "$tmp_file") + fi + popd >/dev/null || exit + done + popd >/dev/null || exit + rm -f "$tmp_file" + # Install the persisted files into the installation directory, via symlinks + restore_persisted_app "$@" +} + +######################## +# Restore a persisted application directory +# Globals: +# BITNAMI_ROOT_DIR +# BITNAMI_VOLUME_DIR +# FORCE_MAJOR_UPGRADE +# Arguments: +# $1 - App folder name +# $2 - List of app files to restore +# Returns: +# true if all steps succeeded, false otherwise +######################### +restore_persisted_app() { + local -r app="${1:?missing app}" + local -a files_to_restore + read -r -a files_to_restore <<< "$(tr ',;:' ' ' <<< "$2")" + local -r install_dir="${BITNAMI_ROOT_DIR}/${app}" + local -r persist_dir="${BITNAMI_VOLUME_DIR}/${app}" + # Restore the individual persisted files + if [[ "${#files_to_restore[@]}" -le 0 ]]; then + warn "No persisted files are configured to be restored" + return + fi + local file_to_restore_relative file_to_restore_origin file_to_restore_destination + for file_to_restore in "${files_to_restore[@]}"; do + file_to_restore_relative="$(relativize "$file_to_restore" "$install_dir")" + # We use 'realpath --no-symlinks' to ensure that the case of '.' is covered and the directory is removed + file_to_restore_origin="$(realpath --no-symlinks "${install_dir}/${file_to_restore_relative}")" + file_to_restore_destination="$(realpath --no-symlinks "${persist_dir}/${file_to_restore_relative}")" + rm -rf "$file_to_restore_origin" + ln -sfn "$file_to_restore_destination" "$file_to_restore_origin" + done +} + +######################## +# Check if an application directory was already persisted +# Globals: +# BITNAMI_VOLUME_DIR +# Arguments: +# $1 - App folder name +# Returns: +# true if all steps succeeded, false otherwise +######################### +is_app_initialized() { + local -r app="${1:?missing app}" + local -r persist_dir="${BITNAMI_VOLUME_DIR}/${app}" + if ! is_mounted_dir_empty "$persist_dir"; then + true + else + false + fi +} diff --git a/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libservice.sh b/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libservice.sh new file mode 100644 index 000000000000..107f54e6b5c9 --- /dev/null +++ b/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libservice.sh @@ -0,0 +1,496 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Library for managing services + +# shellcheck disable=SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/libvalidations.sh +. /opt/bitnami/scripts/liblog.sh + +# Functions + +######################## +# Read the provided pid file and returns a PID +# Arguments: +# $1 - Pid file +# Returns: +# PID +######################### +get_pid_from_file() { + local pid_file="${1:?pid file is missing}" + + if [[ -f "$pid_file" ]]; then + if [[ -n "$(< "$pid_file")" ]] && [[ "$(< "$pid_file")" -gt 0 ]]; then + echo "$(< "$pid_file")" + fi + fi +} + +######################## +# Check if a provided PID corresponds to a running service +# Arguments: +# $1 - PID +# Returns: +# Boolean +######################### +is_service_running() { + local pid="${1:?pid is missing}" + + kill -0 "$pid" 2>/dev/null +} + +######################## +# Stop a service by sending a termination signal to its pid +# Arguments: +# $1 - Pid file +# $2 - Signal number (optional) +# Returns: +# None +######################### +stop_service_using_pid() { + local pid_file="${1:?pid file is missing}" + local signal="${2:-}" + local pid + + pid="$(get_pid_from_file "$pid_file")" + [[ -z "$pid" ]] || ! is_service_running "$pid" && return + + if [[ -n "$signal" ]]; then + kill "-${signal}" "$pid" + else + kill "$pid" + fi + + local counter=10 + while [[ "$counter" -ne 0 ]] && is_service_running "$pid"; do + sleep 1 + counter=$((counter - 1)) + done +} + +######################## +# Start cron daemon +# Arguments: +# None +# Returns: +# true if started correctly, false otherwise +######################### +cron_start() { + if [[ -x "/usr/sbin/cron" ]]; then + /usr/sbin/cron + elif [[ -x "/usr/sbin/crond" ]]; then + /usr/sbin/crond + else + false + fi +} + +######################## +# Generate a cron configuration file for a given service +# Arguments: +# $1 - Service name +# $2 - Command +# Flags: +# --run-as - User to run as (default: root) +# --schedule - Cron schedule configuration (default: * * * * *) +# Returns: +# None +######################### +generate_cron_conf() { + local service_name="${1:?service name is missing}" + local cmd="${2:?command is missing}" + local run_as="root" + local schedule="* * * * *" + local clean="true" + + # Parse optional CLI flags + shift 2 + while [[ "$#" -gt 0 ]]; do + case "$1" in + --run-as) + shift + run_as="$1" + ;; + --schedule) + shift + schedule="$1" + ;; + --no-clean) + clean="false" + ;; + *) + echo "Invalid command line flag ${1}" >&2 + return 1 + ;; + esac + shift + done + + mkdir -p /etc/cron.d + if "$clean"; then + cat > "/etc/cron.d/${service_name}" <> /etc/cron.d/"$service_name" + fi +} + +######################## +# Remove a cron configuration file for a given service +# Arguments: +# $1 - Service name +# Returns: +# None +######################### +remove_cron_conf() { + local service_name="${1:?service name is missing}" + local cron_conf_dir="/etc/monit/conf.d" + rm -f "${cron_conf_dir}/${service_name}" +} + +######################## +# Generate a monit configuration file for a given service +# Arguments: +# $1 - Service name +# $2 - Pid file +# $3 - Start command +# $4 - Stop command +# Flags: +# --disable - Whether to disable the monit configuration +# Returns: +# None +######################### +generate_monit_conf() { + local service_name="${1:?service name is missing}" + local pid_file="${2:?pid file is missing}" + local start_command="${3:?start command is missing}" + local stop_command="${4:?stop command is missing}" + local monit_conf_dir="/etc/monit/conf.d" + local disabled="no" + + # Parse optional CLI flags + shift 4 + while [[ "$#" -gt 0 ]]; do + case "$1" in + --disable) + disabled="yes" + ;; + *) + echo "Invalid command line flag ${1}" >&2 + return 1 + ;; + esac + shift + done + + is_boolean_yes "$disabled" && conf_suffix=".disabled" + mkdir -p "$monit_conf_dir" + cat > "${monit_conf_dir}/${service_name}.conf${conf_suffix:-}" <&2 + return 1 + ;; + esac + shift + done + + mkdir -p "$logrotate_conf_dir" + cat < "${logrotate_conf_dir}/${service_name}" +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +${log_path} { + ${period} + rotate ${rotations} + dateext + compress + copytruncate + missingok +$(indent "$extra" 2) +} +EOF +} + +######################## +# Remove a logrotate configuration file +# Arguments: +# $1 - Service name +# Returns: +# None +######################### +remove_logrotate_conf() { + local service_name="${1:?service name is missing}" + local logrotate_conf_dir="/etc/logrotate.d" + rm -f "${logrotate_conf_dir}/${service_name}" +} + +######################## +# Generate a Systemd configuration file +# Arguments: +# $1 - Service name +# Flags: +# --custom-service-content - Custom content to add to the [service] block +# --environment - Environment variable to define (multiple --environment options may be passed) +# --environment-file - Text file with environment variables (multiple --environment-file options may be passed) +# --exec-start - Start command (required) +# --exec-start-pre - Pre-start command (optional) +# --exec-start-post - Post-start command (optional) +# --exec-stop - Stop command (optional) +# --exec-reload - Reload command (optional) +# --group - System group to start the service with +# --name - Service full name (e.g. Apache HTTP Server, defaults to $1) +# --restart - When to restart the Systemd service after being stopped (defaults to always) +# --pid-file - Service PID file +# --standard-output - File where to print stdout output +# --standard-error - File where to print stderr output +# --success-exit-status - Exit code that indicates a successful shutdown +# --type - Systemd unit type (defaults to forking) +# --user - System user to start the service with +# --working-directory - Working directory at which to start the service +# Returns: +# None +######################### +generate_systemd_conf() { + local -r service_name="${1:?service name is missing}" + local -r systemd_units_dir="/etc/systemd/system" + local -r service_file="${systemd_units_dir}/bitnami.${service_name}.service" + # Default values + local name="$service_name" + local type="forking" + local user="" + local group="" + local environment="" + local environment_file="" + local exec_start="" + local exec_start_pre="" + local exec_start_post="" + local exec_stop="" + local exec_reload="" + local restart="always" + local pid_file="" + local standard_output="journal" + local standard_error="" + local limits_content="" + local success_exit_status="" + local custom_service_content="" + local working_directory="" + # Parse CLI flags + shift + while [[ "$#" -gt 0 ]]; do + case "$1" in + --name \ + | --type \ + | --user \ + | --group \ + | --exec-start \ + | --exec-stop \ + | --exec-reload \ + | --restart \ + | --pid-file \ + | --standard-output \ + | --standard-error \ + | --success-exit-status \ + | --custom-service-content \ + | --working-directory \ + ) + var_name="$(echo "$1" | sed -e "s/^--//" -e "s/-/_/g")" + shift + declare "$var_name"="${1:?"${var_name} value is missing"}" + ;; + --limit-*) + [[ -n "$limits_content" ]] && limits_content+=$'\n' + var_name="${1//--limit-}" + shift + limits_content+="Limit${var_name^^}=${1:?"--limit-${var_name} value is missing"}" + ;; + --exec-start-pre) + shift + [[ -n "$exec_start_pre" ]] && exec_start_pre+=$'\n' + exec_start_pre+="ExecStartPre=${1:?"--exec-start-pre value is missing"}" + ;; + --exec-start-post) + shift + [[ -n "$exec_start_post" ]] && exec_start_post+=$'\n' + exec_start_post+="ExecStartPost=${1:?"--exec-start-post value is missing"}" + ;; + --environment) + shift + # It is possible to add multiple environment lines + [[ -n "$environment" ]] && environment+=$'\n' + environment+="Environment=${1:?"--environment value is missing"}" + ;; + --environment-file) + shift + # It is possible to add multiple environment-file lines + [[ -n "$environment_file" ]] && environment_file+=$'\n' + environment_file+="EnvironmentFile=${1:?"--environment-file value is missing"}" + ;; + *) + echo "Invalid command line flag ${1}" >&2 + return 1 + ;; + esac + shift + done + # Validate inputs + local error="no" + if [[ -z "$exec_start" ]]; then + error "The --exec-start option is required" + error="yes" + fi + if [[ "$error" != "no" ]]; then + return 1 + fi + # Generate the Systemd unit + cat > "$service_file" <> "$service_file" <<< "WorkingDirectory=${working_directory}" + fi + if [[ -n "$exec_start_pre" ]]; then + # This variable may contain multiple ExecStartPre= directives + cat >> "$service_file" <<< "$exec_start_pre" + fi + if [[ -n "$exec_start" ]]; then + cat >> "$service_file" <<< "ExecStart=${exec_start}" + fi + if [[ -n "$exec_start_post" ]]; then + # This variable may contain multiple ExecStartPost= directives + cat >> "$service_file" <<< "$exec_start_post" + fi + # Optional stop and reload commands + if [[ -n "$exec_stop" ]]; then + cat >> "$service_file" <<< "ExecStop=${exec_stop}" + fi + if [[ -n "$exec_reload" ]]; then + cat >> "$service_file" <<< "ExecReload=${exec_reload}" + fi + # User and group + if [[ -n "$user" ]]; then + cat >> "$service_file" <<< "User=${user}" + fi + if [[ -n "$group" ]]; then + cat >> "$service_file" <<< "Group=${group}" + fi + # PID file allows to determine if the main process is running properly (for Restart=always) + if [[ -n "$pid_file" ]]; then + cat >> "$service_file" <<< "PIDFile=${pid_file}" + fi + if [[ -n "$restart" ]]; then + cat >> "$service_file" <<< "Restart=${restart}" + fi + # Environment flags + if [[ -n "$environment" ]]; then + # This variable may contain multiple Environment= directives + cat >> "$service_file" <<< "$environment" + fi + if [[ -n "$environment_file" ]]; then + # This variable may contain multiple EnvironmentFile= directives + cat >> "$service_file" <<< "$environment_file" + fi + # Logging + if [[ -n "$standard_output" ]]; then + cat >> "$service_file" <<< "StandardOutput=${standard_output}" + fi + if [[ -n "$standard_error" ]]; then + cat >> "$service_file" <<< "StandardError=${standard_error}" + fi + if [[ -n "$custom_service_content" ]]; then + # This variable may contain multiple miscellaneous directives + cat >> "$service_file" <<< "$custom_service_content" + fi + if [[ -n "$success_exit_status" ]]; then + cat >> "$service_file" <> "$service_file" <> "$service_file" <> "$service_file" <= 0 )); then + true + else + false + fi +} + +######################## +# Check if the provided argument is a boolean or is the string 'yes/true' +# Arguments: +# $1 - Value to check +# Returns: +# Boolean +######################### +is_boolean_yes() { + local -r bool="${1:-}" + # comparison is performed without regard to the case of alphabetic characters + shopt -s nocasematch + if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then + true + else + false + fi +} + +######################## +# Check if the provided argument is a boolean yes/no value +# Arguments: +# $1 - Value to check +# Returns: +# Boolean +######################### +is_yes_no_value() { + local -r bool="${1:-}" + if [[ "$bool" =~ ^(yes|no)$ ]]; then + true + else + false + fi +} + +######################## +# Check if the provided argument is a boolean true/false value +# Arguments: +# $1 - Value to check +# Returns: +# Boolean +######################### +is_true_false_value() { + local -r bool="${1:-}" + if [[ "$bool" =~ ^(true|false)$ ]]; then + true + else + false + fi +} + +######################## +# Check if the provided argument is a boolean 1/0 value +# Arguments: +# $1 - Value to check +# Returns: +# Boolean +######################### +is_1_0_value() { + local -r bool="${1:-}" + if [[ "$bool" =~ ^[10]$ ]]; then + true + else + false + fi +} + +######################## +# Check if the provided argument is an empty string or not defined +# Arguments: +# $1 - Value to check +# Returns: +# Boolean +######################### +is_empty_value() { + local -r val="${1:-}" + if [[ -z "$val" ]]; then + true + else + false + fi +} + +######################## +# Validate if the provided argument is a valid port +# Arguments: +# $1 - Port to validate +# Returns: +# Boolean and error message +######################### +validate_port() { + local value + local unprivileged=0 + + # Parse flags + while [[ "$#" -gt 0 ]]; do + case "$1" in + -unprivileged) + unprivileged=1 + ;; + --) + shift + break + ;; + -*) + stderr_print "unrecognized flag $1" + return 1 + ;; + *) + break + ;; + esac + shift + done + + if [[ "$#" -gt 1 ]]; then + echo "too many arguments provided" + return 2 + elif [[ "$#" -eq 0 ]]; then + stderr_print "missing port argument" + return 1 + else + value=$1 + fi + + if [[ -z "$value" ]]; then + echo "the value is empty" + return 1 + else + if ! is_int "$value"; then + echo "value is not an integer" + return 2 + elif [[ "$value" -lt 0 ]]; then + echo "negative value provided" + return 2 + elif [[ "$value" -gt 65535 ]]; then + echo "requested port is greater than 65535" + return 2 + elif [[ "$unprivileged" = 1 && "$value" -lt 1024 ]]; then + echo "privileged port requested" + return 3 + fi + fi +} + +######################## +# Validate if the provided argument is a valid IPv6 address +# Arguments: +# $1 - IP to validate +# Returns: +# Boolean +######################### +validate_ipv6() { + local ip="${1:?ip is missing}" + local stat=1 + local full_address_regex='^([0-9a-fA-F]{1,4}:){7}[0-9a-fA-F]{1,4}$' + local short_address_regex='^((([0-9a-fA-F]{1,4}:){0,6}[0-9a-fA-F]{1,4}){0,6}::(([0-9a-fA-F]{1,4}:){0,6}[0-9a-fA-F]{1,4}){0,6})$' + + if [[ $ip =~ $full_address_regex || $ip =~ $short_address_regex || $ip == "::" ]]; then + stat=0 + fi + return $stat +} + +######################## +# Validate if the provided argument is a valid IPv4 address +# Arguments: +# $1 - IP to validate +# Returns: +# Boolean +######################### +validate_ipv4() { + local ip="${1:?ip is missing}" + local stat=1 + + if [[ $ip =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then + read -r -a ip_array <<< "$(tr '.' ' ' <<< "$ip")" + [[ ${ip_array[0]} -le 255 && ${ip_array[1]} -le 255 \ + && ${ip_array[2]} -le 255 && ${ip_array[3]} -le 255 ]] + stat=$? + fi + return $stat +} + +######################## +# Validate if the provided argument is a valid IPv4 or IPv6 address +# Arguments: +# $1 - IP to validate +# Returns: +# Boolean +######################### +validate_ip() { + local ip="${1:?ip is missing}" + local stat=1 + + if validate_ipv4 "$ip"; then + stat=0 + else + stat=$(validate_ipv6 "$ip") + fi + return $stat +} + +######################## +# Validate a string format +# Arguments: +# $1 - String to validate +# Returns: +# Boolean +######################### +validate_string() { + local string + local min_length=-1 + local max_length=-1 + + # Parse flags + while [ "$#" -gt 0 ]; do + case "$1" in + -min-length) + shift + min_length=${1:-} + ;; + -max-length) + shift + max_length=${1:-} + ;; + --) + shift + break + ;; + -*) + stderr_print "unrecognized flag $1" + return 1 + ;; + *) + break + ;; + esac + shift + done + + if [ "$#" -gt 1 ]; then + stderr_print "too many arguments provided" + return 2 + elif [ "$#" -eq 0 ]; then + stderr_print "missing string" + return 1 + else + string=$1 + fi + + if [[ "$min_length" -ge 0 ]] && [[ "${#string}" -lt "$min_length" ]]; then + echo "string length is less than $min_length" + return 1 + fi + if [[ "$max_length" -ge 0 ]] && [[ "${#string}" -gt "$max_length" ]]; then + echo "string length is great than $max_length" + return 1 + fi +} diff --git a/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libversion.sh b/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libversion.sh new file mode 100644 index 000000000000..6ca71ac7bdbb --- /dev/null +++ b/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libversion.sh @@ -0,0 +1,51 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Library for managing versions strings + +# shellcheck disable=SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/liblog.sh + +# Functions +######################## +# Gets semantic version +# Arguments: +# $1 - version: string to extract major.minor.patch +# $2 - section: 1 to extract major, 2 to extract minor, 3 to extract patch +# Returns: +# array with the major, minor and release +######################### +get_sematic_version () { + local version="${1:?version is required}" + local section="${2:?section is required}" + local -a version_sections + + #Regex to parse versions: x.y.z + local -r regex='([0-9]+)(\.([0-9]+)(\.([0-9]+))?)?' + + if [[ "$version" =~ $regex ]]; then + local i=1 + local j=1 + local n=${#BASH_REMATCH[*]} + + while [[ $i -lt $n ]]; do + if [[ -n "${BASH_REMATCH[$i]}" ]] && [[ "${BASH_REMATCH[$i]:0:1}" != '.' ]]; then + version_sections[j]="${BASH_REMATCH[$i]}" + ((j++)) + fi + ((i++)) + done + + local number_regex='^[0-9]+$' + if [[ "$section" =~ $number_regex ]] && (( section > 0 )) && (( section <= 3 )); then + echo "${version_sections[$section]}" + return + else + stderr_print "Section allowed values are: 1, 2, and 3" + return 1 + fi + fi +} diff --git a/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libwebserver.sh b/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libwebserver.sh new file mode 100644 index 000000000000..8023f9b0549a --- /dev/null +++ b/bitnami/elasticsearch/7/debian-12/prebuildfs/opt/bitnami/scripts/libwebserver.sh @@ -0,0 +1,476 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Bitnami web server handler library + +# shellcheck disable=SC1090,SC1091 + +# Load generic libraries +. /opt/bitnami/scripts/liblog.sh + +######################## +# Execute a command (or list of commands) with the web server environment and library loaded +# Globals: +# * +# Arguments: +# None +# Returns: +# None +######################### +web_server_execute() { + local -r web_server="${1:?missing web server}" + shift + # Run program in sub-shell to avoid web server environment getting loaded when not necessary + ( + . "/opt/bitnami/scripts/lib${web_server}.sh" + . "/opt/bitnami/scripts/${web_server}-env.sh" + "$@" + ) +} + +######################## +# Prints the list of enabled web servers +# Globals: +# None +# Arguments: +# None +# Returns: +# None +######################### +web_server_list() { + local -r -a supported_web_servers=(apache nginx) + local -a existing_web_servers=() + for web_server in "${supported_web_servers[@]}"; do + [[ -f "/opt/bitnami/scripts/${web_server}-env.sh" ]] && existing_web_servers+=("$web_server") + done + echo "${existing_web_servers[@]:-}" +} + +######################## +# Prints the currently-enabled web server type (only one, in order of preference) +# Globals: +# None +# Arguments: +# None +# Returns: +# None +######################### +web_server_type() { + local -a web_servers + read -r -a web_servers <<< "$(web_server_list)" + echo "${web_servers[0]:-}" +} + +######################## +# Validate that a supported web server is configured +# Globals: +# None +# Arguments: +# None +# Returns: +# None +######################### +web_server_validate() { + local error_code=0 + local supported_web_servers=("apache" "nginx") + + # Auxiliary functions + print_validation_error() { + error "$1" + error_code=1 + } + + if [[ -z "$(web_server_type)" || ! " ${supported_web_servers[*]} " == *" $(web_server_type) "* ]]; then + print_validation_error "Could not detect any supported web servers. It must be one of: ${supported_web_servers[*]}" + elif ! web_server_execute "$(web_server_type)" type -t "is_$(web_server_type)_running" >/dev/null; then + print_validation_error "Could not load the $(web_server_type) web server library from /opt/bitnami/scripts. Check that it exists and is readable." + fi + + return "$error_code" +} + +######################## +# Check whether the web server is running +# Globals: +# * +# Arguments: +# None +# Returns: +# true if the web server is running, false otherwise +######################### +is_web_server_running() { + "is_$(web_server_type)_running" +} + +######################## +# Start web server +# Globals: +# * +# Arguments: +# None +# Returns: +# None +######################### +web_server_start() { + info "Starting $(web_server_type) in background" + if [[ "${BITNAMI_SERVICE_MANAGER:-}" = "systemd" ]]; then + systemctl start "bitnami.$(web_server_type).service" + else + "${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/start.sh" + fi +} + +######################## +# Stop web server +# Globals: +# * +# Arguments: +# None +# Returns: +# None +######################### +web_server_stop() { + info "Stopping $(web_server_type)" + if [[ "${BITNAMI_SERVICE_MANAGER:-}" = "systemd" ]]; then + systemctl stop "bitnami.$(web_server_type).service" + else + "${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/stop.sh" + fi +} + +######################## +# Restart web server +# Globals: +# * +# Arguments: +# None +# Returns: +# None +######################### +web_server_restart() { + info "Restarting $(web_server_type)" + if [[ "${BITNAMI_SERVICE_MANAGER:-}" = "systemd" ]]; then + systemctl restart "bitnami.$(web_server_type).service" + else + "${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/restart.sh" + fi +} + +######################## +# Reload web server +# Globals: +# * +# Arguments: +# None +# Returns: +# None +######################### +web_server_reload() { + if [[ "${BITNAMI_SERVICE_MANAGER:-}" = "systemd" ]]; then + systemctl reload "bitnami.$(web_server_type).service" + else + "${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/reload.sh" + fi +} + +######################## +# Ensure a web server application configuration exists (i.e. Apache virtual host format or NGINX server block) +# It serves as a wrapper for the specific web server function +# Globals: +# * +# Arguments: +# $1 - App name +# Flags: +# --type - Application type, which has an effect on which configuration template to use +# --hosts - Host listen addresses +# --server-name - Server name +# --server-aliases - Server aliases +# --allow-remote-connections - Whether to allow remote connections or to require local connections +# --disable - Whether to render server configurations with a .disabled prefix +# --disable-http - Whether to render the app's HTTP server configuration with a .disabled prefix +# --disable-https - Whether to render the app's HTTPS server configuration with a .disabled prefix +# --http-port - HTTP port number +# --https-port - HTTPS port number +# --document-root - Path to document root directory +# Apache-specific flags: +# --apache-additional-configuration - Additional vhost configuration (no default) +# --apache-additional-http-configuration - Additional HTTP vhost configuration (no default) +# --apache-additional-https-configuration - Additional HTTPS vhost configuration (no default) +# --apache-before-vhost-configuration - Configuration to add before the directive (no default) +# --apache-allow-override - Whether to allow .htaccess files (only allowed when --move-htaccess is set to 'no' and type is not defined) +# --apache-extra-directory-configuration - Extra configuration for the document root directory +# --apache-proxy-address - Address where to proxy requests +# --apache-proxy-configuration - Extra configuration for the proxy +# --apache-proxy-http-configuration - Extra configuration for the proxy HTTP vhost +# --apache-proxy-https-configuration - Extra configuration for the proxy HTTPS vhost +# --apache-move-htaccess - Move .htaccess files to a common place so they can be loaded during Apache startup (only allowed when type is not defined) +# NGINX-specific flags: +# --nginx-additional-configuration - Additional server block configuration (no default) +# --nginx-external-configuration - Configuration external to server block (no default) +# Returns: +# true if the configuration was enabled, false otherwise +######################## +ensure_web_server_app_configuration_exists() { + local app="${1:?missing app}" + shift + local -a apache_args nginx_args web_servers args_var + apache_args=("$app") + nginx_args=("$app") + # Validate arguments + while [[ "$#" -gt 0 ]]; do + case "$1" in + # Common flags + --disable \ + | --disable-http \ + | --disable-https \ + ) + apache_args+=("$1") + nginx_args+=("$1") + ;; + --hosts \ + | --server-name \ + | --server-aliases \ + | --type \ + | --allow-remote-connections \ + | --http-port \ + | --https-port \ + | --document-root \ + ) + apache_args+=("$1" "${2:?missing value}") + nginx_args+=("$1" "${2:?missing value}") + shift + ;; + + # Specific Apache flags + --apache-additional-configuration \ + | --apache-additional-http-configuration \ + | --apache-additional-https-configuration \ + | --apache-before-vhost-configuration \ + | --apache-allow-override \ + | --apache-extra-directory-configuration \ + | --apache-proxy-address \ + | --apache-proxy-configuration \ + | --apache-proxy-http-configuration \ + | --apache-proxy-https-configuration \ + | --apache-move-htaccess \ + ) + apache_args+=("${1//apache-/}" "${2:?missing value}") + shift + ;; + + # Specific NGINX flags + --nginx-additional-configuration \ + | --nginx-external-configuration) + nginx_args+=("${1//nginx-/}" "${2:?missing value}") + shift + ;; + + *) + echo "Invalid command line flag $1" >&2 + return 1 + ;; + esac + shift + done + read -r -a web_servers <<< "$(web_server_list)" + for web_server in "${web_servers[@]}"; do + args_var="${web_server}_args[@]" + web_server_execute "$web_server" "ensure_${web_server}_app_configuration_exists" "${!args_var}" + done +} + +######################## +# Ensure a web server application configuration does not exist anymore (i.e. Apache virtual host format or NGINX server block) +# It serves as a wrapper for the specific web server function +# Globals: +# * +# Arguments: +# $1 - App name +# Returns: +# true if the configuration was disabled, false otherwise +######################## +ensure_web_server_app_configuration_not_exists() { + local app="${1:?missing app}" + local -a web_servers + read -r -a web_servers <<< "$(web_server_list)" + for web_server in "${web_servers[@]}"; do + web_server_execute "$web_server" "ensure_${web_server}_app_configuration_not_exists" "$app" + done +} + +######################## +# Ensure the web server loads the configuration for an application in a URL prefix +# It serves as a wrapper for the specific web server function +# Globals: +# * +# Arguments: +# $1 - App name +# Flags: +# --allow-remote-connections - Whether to allow remote connections or to require local connections +# --document-root - Path to document root directory +# --prefix - URL prefix from where it will be accessible (i.e. /myapp) +# --type - Application type, which has an effect on what configuration template will be used +# Apache-specific flags: +# --apache-additional-configuration - Additional vhost configuration (no default) +# --apache-allow-override - Whether to allow .htaccess files (only allowed when --move-htaccess is set to 'no') +# --apache-extra-directory-configuration - Extra configuration for the document root directory +# --apache-move-htaccess - Move .htaccess files to a common place so they can be loaded during Apache startup +# NGINX-specific flags: +# --nginx-additional-configuration - Additional server block configuration (no default) +# Returns: +# true if the configuration was enabled, false otherwise +######################## +ensure_web_server_prefix_configuration_exists() { + local app="${1:?missing app}" + shift + local -a apache_args nginx_args web_servers args_var + apache_args=("$app") + nginx_args=("$app") + # Validate arguments + while [[ "$#" -gt 0 ]]; do + case "$1" in + # Common flags + --allow-remote-connections \ + | --document-root \ + | --prefix \ + | --type \ + ) + apache_args+=("$1" "${2:?missing value}") + nginx_args+=("$1" "${2:?missing value}") + shift + ;; + + # Specific Apache flags + --apache-additional-configuration \ + | --apache-allow-override \ + | --apache-extra-directory-configuration \ + | --apache-move-htaccess \ + ) + apache_args+=("${1//apache-/}" "$2") + shift + ;; + + # Specific NGINX flags + --nginx-additional-configuration) + nginx_args+=("${1//nginx-/}" "$2") + shift + ;; + + *) + echo "Invalid command line flag $1" >&2 + return 1 + ;; + esac + shift + done + read -r -a web_servers <<< "$(web_server_list)" + for web_server in "${web_servers[@]}"; do + args_var="${web_server}_args[@]" + web_server_execute "$web_server" "ensure_${web_server}_prefix_configuration_exists" "${!args_var}" + done +} + +######################## +# Ensure a web server application configuration is updated with the runtime configuration (i.e. ports) +# It serves as a wrapper for the specific web server function +# Globals: +# * +# Arguments: +# $1 - App name +# Flags: +# --hosts - Host listen addresses +# --server-name - Server name +# --server-aliases - Server aliases +# --enable-http - Enable HTTP app configuration (if not enabled already) +# --enable-https - Enable HTTPS app configuration (if not enabled already) +# --disable-http - Disable HTTP app configuration (if not disabled already) +# --disable-https - Disable HTTPS app configuration (if not disabled already) +# --http-port - HTTP port number +# --https-port - HTTPS port number +# Returns: +# true if the configuration was updated, false otherwise +######################## +web_server_update_app_configuration() { + local app="${1:?missing app}" + shift + local -a args web_servers + args=("$app") + # Validate arguments + while [[ "$#" -gt 0 ]]; do + case "$1" in + # Common flags + --enable-http \ + | --enable-https \ + | --disable-http \ + | --disable-https \ + ) + args+=("$1") + ;; + --hosts \ + | --server-name \ + | --server-aliases \ + | --http-port \ + | --https-port \ + ) + args+=("$1" "${2:?missing value}") + shift + ;; + + *) + echo "Invalid command line flag $1" >&2 + return 1 + ;; + esac + shift + done + read -r -a web_servers <<< "$(web_server_list)" + for web_server in "${web_servers[@]}"; do + web_server_execute "$web_server" "${web_server}_update_app_configuration" "${args[@]}" + done +} + +######################## +# Enable loading page, which shows users that the initialization process is not yet completed +# Globals: +# * +# Arguments: +# None +# Returns: +# None +######################### +web_server_enable_loading_page() { + ensure_web_server_app_configuration_exists "__loading" --hosts "_default_" \ + --apache-additional-configuration " +# Show a HTTP 503 Service Unavailable page by default +RedirectMatch 503 ^/$ +# Show index.html if server is answering with 404 Not Found or 503 Service Unavailable status codes +ErrorDocument 404 /index.html +ErrorDocument 503 /index.html" \ + --nginx-additional-configuration " +# Show a HTTP 503 Service Unavailable page by default +location / { + return 503; +} +# Show index.html if server is answering with 404 Not Found or 503 Service Unavailable status codes +error_page 404 @installing; +error_page 503 @installing; +location @installing { + rewrite ^(.*)$ /index.html break; +}" + web_server_reload +} + +######################## +# Enable loading page, which shows users that the initialization process is not yet completed +# Globals: +# * +# Arguments: +# None +# Returns: +# None +######################### +web_server_disable_install_page() { + ensure_web_server_app_configuration_not_exists "__loading" + web_server_reload +} diff --git a/bitnami/elasticsearch/7/debian-12/prebuildfs/usr/sbin/install_packages b/bitnami/elasticsearch/7/debian-12/prebuildfs/usr/sbin/install_packages new file mode 100755 index 000000000000..acbc3173208c --- /dev/null +++ b/bitnami/elasticsearch/7/debian-12/prebuildfs/usr/sbin/install_packages @@ -0,0 +1,27 @@ +#!/bin/sh +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +set -eu + +n=0 +max=2 +export DEBIAN_FRONTEND=noninteractive + +until [ $n -gt $max ]; do + set +e + ( + apt-get update -qq && + apt-get install -y --no-install-recommends "$@" + ) + CODE=$? + set -e + if [ $CODE -eq 0 ]; then + break + fi + if [ $n -eq $max ]; then + exit $CODE + fi + echo "apt failed, retrying" + n=$(($n + 1)) +done +apt-get clean && rm -rf /var/lib/apt/lists /var/cache/apt/archives diff --git a/bitnami/elasticsearch/7/debian-12/prebuildfs/usr/sbin/run-script b/bitnami/elasticsearch/7/debian-12/prebuildfs/usr/sbin/run-script new file mode 100755 index 000000000000..4ca0f897277e --- /dev/null +++ b/bitnami/elasticsearch/7/debian-12/prebuildfs/usr/sbin/run-script @@ -0,0 +1,24 @@ +#!/bin/sh +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +set -u + +if [ $# -eq 0 ]; then + >&2 echo "No arguments provided" + exit 1 +fi + +script=$1 +exit_code="${2:-96}" +fail_if_not_present="${3:-n}" + +if test -f "$script"; then + sh $script + + if [ $? -ne 0 ]; then + exit $((exit_code)) + fi +elif [ "$fail_if_not_present" = "y" ]; then + >&2 echo "script not found: $script" + exit 127 +fi diff --git a/bitnami/elasticsearch/7/debian-12/rootfs/opt/bitnami/scripts/elasticsearch-env.sh b/bitnami/elasticsearch/7/debian-12/rootfs/opt/bitnami/scripts/elasticsearch-env.sh new file mode 100644 index 000000000000..145eb7455224 --- /dev/null +++ b/bitnami/elasticsearch/7/debian-12/rootfs/opt/bitnami/scripts/elasticsearch-env.sh @@ -0,0 +1,258 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Environment configuration for elasticsearch + +# The values for all environment variables will be set in the below order of precedence +# 1. Custom environment variables defined below after Bitnami defaults +# 2. Constants defined in this file (environment variables with no default), i.e. BITNAMI_ROOT_DIR +# 3. Environment variables overridden via external files using *_FILE variables (see below) +# 4. Environment variables set externally (i.e. current Bash context/Dockerfile/userdata) + +# Load logging library +# shellcheck disable=SC1090,SC1091 +. /opt/bitnami/scripts/liblog.sh + +export BITNAMI_ROOT_DIR="/opt/bitnami" +export BITNAMI_VOLUME_DIR="/bitnami" + +# Logging configuration +export MODULE="${MODULE:-elasticsearch}" +export BITNAMI_DEBUG="${BITNAMI_DEBUG:-false}" + +# By setting an environment variable matching *_FILE to a file path, the prefixed environment +# variable will be overridden with the value specified in that file +elasticsearch_env_vars=( + ELASTICSEARCH_CERTS_DIR + ELASTICSEARCH_DATA_DIR_LIST + ELASTICSEARCH_BIND_ADDRESS + ELASTICSEARCH_ADVERTISED_HOSTNAME + ELASTICSEARCH_CLUSTER_HOSTS + ELASTICSEARCH_CLUSTER_MASTER_HOSTS + ELASTICSEARCH_CLUSTER_NAME + ELASTICSEARCH_HEAP_SIZE + ELASTICSEARCH_MAX_ALLOWED_MEMORY_PERCENTAGE + ELASTICSEARCH_MAX_ALLOWED_MEMORY + ELASTICSEARCH_MAX_TIMEOUT + ELASTICSEARCH_LOCK_ALL_MEMORY + ELASTICSEARCH_DISABLE_JVM_HEAP_DUMP + ELASTICSEARCH_DISABLE_GC_LOGS + ELASTICSEARCH_IS_DEDICATED_NODE + ELASTICSEARCH_MINIMUM_MASTER_NODES + ELASTICSEARCH_NODE_NAME + ELASTICSEARCH_FS_SNAPSHOT_REPO_PATH + ELASTICSEARCH_NODE_ROLES + ELASTICSEARCH_PLUGINS + ELASTICSEARCH_TRANSPORT_PORT_NUMBER + ELASTICSEARCH_HTTP_PORT_NUMBER + ELASTICSEARCH_ENABLE_SECURITY + ELASTICSEARCH_PASSWORD + ELASTICSEARCH_TLS_VERIFICATION_MODE + ELASTICSEARCH_TLS_USE_PEM + ELASTICSEARCH_KEYSTORE_PASSWORD + ELASTICSEARCH_TRUSTSTORE_PASSWORD + ELASTICSEARCH_KEY_PASSWORD + ELASTICSEARCH_KEYSTORE_LOCATION + ELASTICSEARCH_TRUSTSTORE_LOCATION + ELASTICSEARCH_NODE_CERT_LOCATION + ELASTICSEARCH_NODE_KEY_LOCATION + ELASTICSEARCH_CA_CERT_LOCATION + ELASTICSEARCH_SKIP_TRANSPORT_TLS + ELASTICSEARCH_TRANSPORT_TLS_USE_PEM + ELASTICSEARCH_TRANSPORT_TLS_KEYSTORE_PASSWORD + ELASTICSEARCH_TRANSPORT_TLS_TRUSTSTORE_PASSWORD + ELASTICSEARCH_TRANSPORT_TLS_KEY_PASSWORD + ELASTICSEARCH_TRANSPORT_TLS_KEYSTORE_LOCATION + ELASTICSEARCH_TRANSPORT_TLS_TRUSTSTORE_LOCATION + ELASTICSEARCH_TRANSPORT_TLS_NODE_CERT_LOCATION + ELASTICSEARCH_TRANSPORT_TLS_NODE_KEY_LOCATION + ELASTICSEARCH_TRANSPORT_TLS_CA_CERT_LOCATION + ELASTICSEARCH_ENABLE_REST_TLS + ELASTICSEARCH_HTTP_TLS_USE_PEM + ELASTICSEARCH_HTTP_TLS_KEYSTORE_PASSWORD + ELASTICSEARCH_HTTP_TLS_TRUSTSTORE_PASSWORD + ELASTICSEARCH_HTTP_TLS_KEY_PASSWORD + ELASTICSEARCH_HTTP_TLS_KEYSTORE_LOCATION + ELASTICSEARCH_HTTP_TLS_TRUSTSTORE_LOCATION + ELASTICSEARCH_HTTP_TLS_NODE_CERT_LOCATION + ELASTICSEARCH_HTTP_TLS_NODE_KEY_LOCATION + ELASTICSEARCH_HTTP_TLS_CA_CERT_LOCATION + ELASTICSEARCH_ENABLE_FIPS_MODE + ELASTICSEARCH_KEYS + ELASTICSEARCH_ACTION_DESTRUCTIVE_REQUIRES_NAME + DB_MINIMUM_MANAGER_NODES +) +for env_var in "${elasticsearch_env_vars[@]}"; do + file_env_var="${env_var}_FILE" + if [[ -n "${!file_env_var:-}" ]]; then + if [[ -r "${!file_env_var:-}" ]]; then + export "${env_var}=$(< "${!file_env_var}")" + unset "${file_env_var}" + else + warn "Skipping export of '${env_var}'. '${!file_env_var:-}' is not readable." + fi + fi +done +unset elasticsearch_env_vars +export DB_FLAVOR="elasticsearch" + +# Paths +export ELASTICSEARCH_VOLUME_DIR="/bitnami/elasticsearch" +export DB_VOLUME_DIR="$ELASTICSEARCH_VOLUME_DIR" +export ELASTICSEARCH_BASE_DIR="/opt/bitnami/elasticsearch" +export DB_BASE_DIR="$ELASTICSEARCH_BASE_DIR" +export ELASTICSEARCH_CONF_DIR="${DB_BASE_DIR}/config" +export DB_CONF_DIR="$ELASTICSEARCH_CONF_DIR" +export ELASTICSEARCH_DEFAULT_CONF_DIR="${DB_BASE_DIR}/config.default" +export DB_DEFAULT_CONF_DIR="$ELASTICSEARCH_DEFAULT_CONF_DIR" +export ELASTICSEARCH_CERTS_DIR="${ELASTICSEARCH_CERTS_DIR:-${DB_CONF_DIR}/certs}" +export DB_CERTS_DIR="$ELASTICSEARCH_CERTS_DIR" +export ELASTICSEARCH_LOGS_DIR="${DB_BASE_DIR}/logs" +export DB_LOGS_DIR="$ELASTICSEARCH_LOGS_DIR" +export ELASTICSEARCH_PLUGINS_DIR="${DB_BASE_DIR}/plugins" +export DB_PLUGINS_DIR="$ELASTICSEARCH_PLUGINS_DIR" +export ELASTICSEARCH_DEFAULT_PLUGINS_DIR="${DB_BASE_DIR}/plugins.default" +export DB_DEFAULT_PLUGINS_DIR="$ELASTICSEARCH_DEFAULT_PLUGINS_DIR" +export ELASTICSEARCH_DATA_DIR="${DB_VOLUME_DIR}/data" +export DB_DATA_DIR="$ELASTICSEARCH_DATA_DIR" +export ELASTICSEARCH_DATA_DIR_LIST="${ELASTICSEARCH_DATA_DIR_LIST:-}" +export DB_DATA_DIR_LIST="$ELASTICSEARCH_DATA_DIR_LIST" +export ELASTICSEARCH_TMP_DIR="${DB_BASE_DIR}/tmp" +export DB_TMP_DIR="$ELASTICSEARCH_TMP_DIR" +export ELASTICSEARCH_BIN_DIR="${DB_BASE_DIR}/bin" +export DB_BIN_DIR="$ELASTICSEARCH_BIN_DIR" +export ELASTICSEARCH_MOUNTED_PLUGINS_DIR="${DB_VOLUME_DIR}/plugins" +export DB_MOUNTED_PLUGINS_DIR="$ELASTICSEARCH_MOUNTED_PLUGINS_DIR" +export ELASTICSEARCH_CONF_FILE="${DB_CONF_DIR}/elasticsearch.yml" +export DB_CONF_FILE="$ELASTICSEARCH_CONF_FILE" +export ELASTICSEARCH_LOG_FILE="${DB_LOGS_DIR}/elasticsearch.log" +export DB_LOG_FILE="$ELASTICSEARCH_LOG_FILE" +export ELASTICSEARCH_PID_FILE="${DB_TMP_DIR}/elasticsearch.pid" +export DB_PID_FILE="$ELASTICSEARCH_PID_FILE" +export ELASTICSEARCH_INITSCRIPTS_DIR="/docker-entrypoint-initdb.d" +export DB_INITSCRIPTS_DIR="$ELASTICSEARCH_INITSCRIPTS_DIR" +export PATH="${DB_BIN_DIR}:${BITNAMI_ROOT_DIR}/common/bin:$PATH" + +# System users (when running with a privileged user) +export ELASTICSEARCH_DAEMON_USER="elasticsearch" +export DB_DAEMON_USER="$ELASTICSEARCH_DAEMON_USER" +export ELASTICSEARCH_DAEMON_GROUP="elasticsearch" +export DB_DAEMON_GROUP="$ELASTICSEARCH_DAEMON_GROUP" + +# Elasticsearch configuration +export ELASTICSEARCH_BIND_ADDRESS="${ELASTICSEARCH_BIND_ADDRESS:-}" +export DB_BIND_ADDRESS="$ELASTICSEARCH_BIND_ADDRESS" +export ELASTICSEARCH_ADVERTISED_HOSTNAME="${ELASTICSEARCH_ADVERTISED_HOSTNAME:-}" +export DB_ADVERTISED_HOSTNAME="$ELASTICSEARCH_ADVERTISED_HOSTNAME" +export ELASTICSEARCH_CLUSTER_HOSTS="${ELASTICSEARCH_CLUSTER_HOSTS:-}" +export DB_CLUSTER_HOSTS="$ELASTICSEARCH_CLUSTER_HOSTS" +export ELASTICSEARCH_CLUSTER_MASTER_HOSTS="${ELASTICSEARCH_CLUSTER_MASTER_HOSTS:-}" +export DB_CLUSTER_MASTER_HOSTS="$ELASTICSEARCH_CLUSTER_MASTER_HOSTS" +export ELASTICSEARCH_CLUSTER_NAME="${ELASTICSEARCH_CLUSTER_NAME:-}" +export DB_CLUSTER_NAME="$ELASTICSEARCH_CLUSTER_NAME" +export ELASTICSEARCH_HEAP_SIZE="${ELASTICSEARCH_HEAP_SIZE:-1024m}" +export DB_HEAP_SIZE="$ELASTICSEARCH_HEAP_SIZE" +export ELASTICSEARCH_MAX_ALLOWED_MEMORY_PERCENTAGE="${ELASTICSEARCH_MAX_ALLOWED_MEMORY_PERCENTAGE:-100}" +export DB_MAX_ALLOWED_MEMORY_PERCENTAGE="$ELASTICSEARCH_MAX_ALLOWED_MEMORY_PERCENTAGE" +export ELASTICSEARCH_MAX_ALLOWED_MEMORY="${ELASTICSEARCH_MAX_ALLOWED_MEMORY:-}" +export DB_MAX_ALLOWED_MEMORY="$ELASTICSEARCH_MAX_ALLOWED_MEMORY" +export ELASTICSEARCH_MAX_TIMEOUT="${ELASTICSEARCH_MAX_TIMEOUT:-60}" +export DB_MAX_TIMEOUT="$ELASTICSEARCH_MAX_TIMEOUT" +export ELASTICSEARCH_LOCK_ALL_MEMORY="${ELASTICSEARCH_LOCK_ALL_MEMORY:-no}" +export DB_LOCK_ALL_MEMORY="$ELASTICSEARCH_LOCK_ALL_MEMORY" +export ELASTICSEARCH_DISABLE_JVM_HEAP_DUMP="${ELASTICSEARCH_DISABLE_JVM_HEAP_DUMP:-no}" +export DB_DISABLE_JVM_HEAP_DUMP="$ELASTICSEARCH_DISABLE_JVM_HEAP_DUMP" +export ELASTICSEARCH_DISABLE_GC_LOGS="${ELASTICSEARCH_DISABLE_GC_LOGS:-no}" +export DB_DISABLE_GC_LOGS="$ELASTICSEARCH_DISABLE_GC_LOGS" +export ELASTICSEARCH_IS_DEDICATED_NODE="${ELASTICSEARCH_IS_DEDICATED_NODE:-no}" +export DB_IS_DEDICATED_NODE="$ELASTICSEARCH_IS_DEDICATED_NODE" +ELASTICSEARCH_MINIMUM_MASTER_NODES="${ELASTICSEARCH_MINIMUM_MASTER_NODES:-"${DB_MINIMUM_MANAGER_NODES:-}"}" +export ELASTICSEARCH_MINIMUM_MASTER_NODES="${ELASTICSEARCH_MINIMUM_MASTER_NODES:-}" +export DB_MINIMUM_MASTER_NODES="$ELASTICSEARCH_MINIMUM_MASTER_NODES" +export ELASTICSEARCH_NODE_NAME="${ELASTICSEARCH_NODE_NAME:-}" +export DB_NODE_NAME="$ELASTICSEARCH_NODE_NAME" +export ELASTICSEARCH_FS_SNAPSHOT_REPO_PATH="${ELASTICSEARCH_FS_SNAPSHOT_REPO_PATH:-}" +export DB_FS_SNAPSHOT_REPO_PATH="$ELASTICSEARCH_FS_SNAPSHOT_REPO_PATH" +export ELASTICSEARCH_NODE_ROLES="${ELASTICSEARCH_NODE_ROLES:-}" +export DB_NODE_ROLES="$ELASTICSEARCH_NODE_ROLES" +export ELASTICSEARCH_PLUGINS="${ELASTICSEARCH_PLUGINS:-}" +export DB_PLUGINS="$ELASTICSEARCH_PLUGINS" +export ELASTICSEARCH_TRANSPORT_PORT_NUMBER="${ELASTICSEARCH_TRANSPORT_PORT_NUMBER:-9300}" +export DB_TRANSPORT_PORT_NUMBER="$ELASTICSEARCH_TRANSPORT_PORT_NUMBER" +export ELASTICSEARCH_HTTP_PORT_NUMBER="${ELASTICSEARCH_HTTP_PORT_NUMBER:-9200}" +export DB_HTTP_PORT_NUMBER="$ELASTICSEARCH_HTTP_PORT_NUMBER" + +# Elasticsearch Security configuration +export ELASTICSEARCH_ENABLE_SECURITY="${ELASTICSEARCH_ENABLE_SECURITY:-false}" +export DB_ENABLE_SECURITY="$ELASTICSEARCH_ENABLE_SECURITY" +export ELASTICSEARCH_PASSWORD="${ELASTICSEARCH_PASSWORD:-bitnami}" +export DB_PASSWORD="$ELASTICSEARCH_PASSWORD" +export ELASTICSEARCH_USERNAME="elastic" +export DB_USERNAME="$ELASTICSEARCH_USERNAME" +export ELASTICSEARCH_TLS_VERIFICATION_MODE="${ELASTICSEARCH_TLS_VERIFICATION_MODE:-full}" +export DB_TLS_VERIFICATION_MODE="$ELASTICSEARCH_TLS_VERIFICATION_MODE" +export ELASTICSEARCH_TLS_USE_PEM="${ELASTICSEARCH_TLS_USE_PEM:-false}" +export DB_TLS_USE_PEM="$ELASTICSEARCH_TLS_USE_PEM" +export ELASTICSEARCH_KEYSTORE_PASSWORD="${ELASTICSEARCH_KEYSTORE_PASSWORD:-}" +export DB_KEYSTORE_PASSWORD="$ELASTICSEARCH_KEYSTORE_PASSWORD" +export ELASTICSEARCH_TRUSTSTORE_PASSWORD="${ELASTICSEARCH_TRUSTSTORE_PASSWORD:-}" +export DB_TRUSTSTORE_PASSWORD="$ELASTICSEARCH_TRUSTSTORE_PASSWORD" +export ELASTICSEARCH_KEY_PASSWORD="${ELASTICSEARCH_KEY_PASSWORD:-}" +export DB_KEY_PASSWORD="$ELASTICSEARCH_KEY_PASSWORD" +export ELASTICSEARCH_KEYSTORE_LOCATION="${ELASTICSEARCH_KEYSTORE_LOCATION:-${DB_CERTS_DIR}/elasticsearch.keystore.jks}" +export DB_KEYSTORE_LOCATION="$ELASTICSEARCH_KEYSTORE_LOCATION" +export ELASTICSEARCH_TRUSTSTORE_LOCATION="${ELASTICSEARCH_TRUSTSTORE_LOCATION:-${DB_CERTS_DIR}/elasticsearch.truststore.jks}" +export DB_TRUSTSTORE_LOCATION="$ELASTICSEARCH_TRUSTSTORE_LOCATION" +export ELASTICSEARCH_NODE_CERT_LOCATION="${ELASTICSEARCH_NODE_CERT_LOCATION:-${DB_CERTS_DIR}/tls.crt}" +export DB_NODE_CERT_LOCATION="$ELASTICSEARCH_NODE_CERT_LOCATION" +export ELASTICSEARCH_NODE_KEY_LOCATION="${ELASTICSEARCH_NODE_KEY_LOCATION:-${DB_CERTS_DIR}/tls.key}" +export DB_NODE_KEY_LOCATION="$ELASTICSEARCH_NODE_KEY_LOCATION" +export ELASTICSEARCH_CA_CERT_LOCATION="${ELASTICSEARCH_CA_CERT_LOCATION:-${DB_CERTS_DIR}/ca.crt}" +export DB_CA_CERT_LOCATION="$ELASTICSEARCH_CA_CERT_LOCATION" +export ELASTICSEARCH_SKIP_TRANSPORT_TLS="${ELASTICSEARCH_SKIP_TRANSPORT_TLS:-false}" +export DB_SKIP_TRANSPORT_TLS="$ELASTICSEARCH_SKIP_TRANSPORT_TLS" +export ELASTICSEARCH_TRANSPORT_TLS_USE_PEM="${ELASTICSEARCH_TRANSPORT_TLS_USE_PEM:-$DB_TLS_USE_PEM}" +export DB_TRANSPORT_TLS_USE_PEM="$ELASTICSEARCH_TRANSPORT_TLS_USE_PEM" +export ELASTICSEARCH_TRANSPORT_TLS_KEYSTORE_PASSWORD="${ELASTICSEARCH_TRANSPORT_TLS_KEYSTORE_PASSWORD:-$DB_KEYSTORE_PASSWORD}" +export DB_TRANSPORT_TLS_KEYSTORE_PASSWORD="$ELASTICSEARCH_TRANSPORT_TLS_KEYSTORE_PASSWORD" +export ELASTICSEARCH_TRANSPORT_TLS_TRUSTSTORE_PASSWORD="${ELASTICSEARCH_TRANSPORT_TLS_TRUSTSTORE_PASSWORD:-$DB_TRUSTSTORE_PASSWORD}" +export DB_TRANSPORT_TLS_TRUSTSTORE_PASSWORD="$ELASTICSEARCH_TRANSPORT_TLS_TRUSTSTORE_PASSWORD" +export ELASTICSEARCH_TRANSPORT_TLS_KEY_PASSWORD="${ELASTICSEARCH_TRANSPORT_TLS_KEY_PASSWORD:-$DB_KEY_PASSWORD}" +export DB_TRANSPORT_TLS_KEY_PASSWORD="$ELASTICSEARCH_TRANSPORT_TLS_KEY_PASSWORD" +export ELASTICSEARCH_TRANSPORT_TLS_KEYSTORE_LOCATION="${ELASTICSEARCH_TRANSPORT_TLS_KEYSTORE_LOCATION:-$DB_KEYSTORE_LOCATION}" +export DB_TRANSPORT_TLS_KEYSTORE_LOCATION="$ELASTICSEARCH_TRANSPORT_TLS_KEYSTORE_LOCATION" +export ELASTICSEARCH_TRANSPORT_TLS_TRUSTSTORE_LOCATION="${ELASTICSEARCH_TRANSPORT_TLS_TRUSTSTORE_LOCATION:-$DB_TRUSTSTORE_LOCATION}" +export DB_TRANSPORT_TLS_TRUSTSTORE_LOCATION="$ELASTICSEARCH_TRANSPORT_TLS_TRUSTSTORE_LOCATION" +export ELASTICSEARCH_TRANSPORT_TLS_NODE_CERT_LOCATION="${ELASTICSEARCH_TRANSPORT_TLS_NODE_CERT_LOCATION:-$DB_NODE_CERT_LOCATION}" +export DB_TRANSPORT_TLS_NODE_CERT_LOCATION="$ELASTICSEARCH_TRANSPORT_TLS_NODE_CERT_LOCATION" +export ELASTICSEARCH_TRANSPORT_TLS_NODE_KEY_LOCATION="${ELASTICSEARCH_TRANSPORT_TLS_NODE_KEY_LOCATION:-$DB_NODE_KEY_LOCATION}" +export DB_TRANSPORT_TLS_NODE_KEY_LOCATION="$ELASTICSEARCH_TRANSPORT_TLS_NODE_KEY_LOCATION" +export ELASTICSEARCH_TRANSPORT_TLS_CA_CERT_LOCATION="${ELASTICSEARCH_TRANSPORT_TLS_CA_CERT_LOCATION:-$DB_CA_CERT_LOCATION}" +export DB_TRANSPORT_TLS_CA_CERT_LOCATION="$ELASTICSEARCH_TRANSPORT_TLS_CA_CERT_LOCATION" +export ELASTICSEARCH_ENABLE_REST_TLS="${ELASTICSEARCH_ENABLE_REST_TLS:-true}" +export DB_ENABLE_REST_TLS="$ELASTICSEARCH_ENABLE_REST_TLS" +export ELASTICSEARCH_HTTP_TLS_USE_PEM="${ELASTICSEARCH_HTTP_TLS_USE_PEM:-$DB_TLS_USE_PEM}" +export DB_HTTP_TLS_USE_PEM="$ELASTICSEARCH_HTTP_TLS_USE_PEM" +export ELASTICSEARCH_HTTP_TLS_KEYSTORE_PASSWORD="${ELASTICSEARCH_HTTP_TLS_KEYSTORE_PASSWORD:-$DB_KEYSTORE_PASSWORD}" +export DB_HTTP_TLS_KEYSTORE_PASSWORD="$ELASTICSEARCH_HTTP_TLS_KEYSTORE_PASSWORD" +export ELASTICSEARCH_HTTP_TLS_TRUSTSTORE_PASSWORD="${ELASTICSEARCH_HTTP_TLS_TRUSTSTORE_PASSWORD:-$DB_TRUSTSTORE_PASSWORD}" +export DB_HTTP_TLS_TRUSTSTORE_PASSWORD="$ELASTICSEARCH_HTTP_TLS_TRUSTSTORE_PASSWORD" +export ELASTICSEARCH_HTTP_TLS_KEY_PASSWORD="${ELASTICSEARCH_HTTP_TLS_KEY_PASSWORD:-$DB_KEY_PASSWORD}" +export DB_HTTP_TLS_KEY_PASSWORD="$ELASTICSEARCH_HTTP_TLS_KEY_PASSWORD" +export ELASTICSEARCH_HTTP_TLS_KEYSTORE_LOCATION="${ELASTICSEARCH_HTTP_TLS_KEYSTORE_LOCATION:-$DB_KEYSTORE_LOCATION}" +export DB_HTTP_TLS_KEYSTORE_LOCATION="$ELASTICSEARCH_HTTP_TLS_KEYSTORE_LOCATION" +export ELASTICSEARCH_HTTP_TLS_TRUSTSTORE_LOCATION="${ELASTICSEARCH_HTTP_TLS_TRUSTSTORE_LOCATION:-$DB_TRUSTSTORE_LOCATION}" +export DB_HTTP_TLS_TRUSTSTORE_LOCATION="$ELASTICSEARCH_HTTP_TLS_TRUSTSTORE_LOCATION" +export ELASTICSEARCH_HTTP_TLS_NODE_CERT_LOCATION="${ELASTICSEARCH_HTTP_TLS_NODE_CERT_LOCATION:-$DB_NODE_CERT_LOCATION}" +export DB_HTTP_TLS_NODE_CERT_LOCATION="$ELASTICSEARCH_HTTP_TLS_NODE_CERT_LOCATION" +export ELASTICSEARCH_HTTP_TLS_NODE_KEY_LOCATION="${ELASTICSEARCH_HTTP_TLS_NODE_KEY_LOCATION:-$DB_NODE_KEY_LOCATION}" +export DB_HTTP_TLS_NODE_KEY_LOCATION="$ELASTICSEARCH_HTTP_TLS_NODE_KEY_LOCATION" +export ELASTICSEARCH_HTTP_TLS_CA_CERT_LOCATION="${ELASTICSEARCH_HTTP_TLS_CA_CERT_LOCATION:-$DB_CA_CERT_LOCATION}" +export DB_HTTP_TLS_CA_CERT_LOCATION="$ELASTICSEARCH_HTTP_TLS_CA_CERT_LOCATION" +export ELASTICSEARCH_ENABLE_FIPS_MODE="${ELASTICSEARCH_ENABLE_FIPS_MODE:-false}" +export ELASTICSEARCH_KEYS="${ELASTICSEARCH_KEYS:-}" +export ELASTICSEARCH_ACTION_DESTRUCTIVE_REQUIRES_NAME="${ELASTICSEARCH_ACTION_DESTRUCTIVE_REQUIRES_NAME:-}" +export DB_ACTION_DESTRUCTIVE_REQUIRES_NAME="$ELASTICSEARCH_ACTION_DESTRUCTIVE_REQUIRES_NAME" + +# Custom environment variables may be defined below diff --git a/bitnami/elasticsearch/7/debian-12/rootfs/opt/bitnami/scripts/elasticsearch/entrypoint.sh b/bitnami/elasticsearch/7/debian-12/rootfs/opt/bitnami/scripts/elasticsearch/entrypoint.sh new file mode 100755 index 000000000000..0261f5a3c97d --- /dev/null +++ b/bitnami/elasticsearch/7/debian-12/rootfs/opt/bitnami/scripts/elasticsearch/entrypoint.sh @@ -0,0 +1,47 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +# shellcheck disable=SC1091 + +set -o errexit +set -o nounset +set -o pipefail +#set -o xtrace + +# Load libraries +. /opt/bitnami/scripts/libbitnami.sh +. /opt/bitnami/scripts/libelasticsearch.sh + +# Load environment +. /opt/bitnami/scripts/elasticsearch-env.sh + +print_welcome_page + +# We add the copy from default config in the entrypoint to not break users +# bypassing the setup.sh logic. If the file already exists do not overwrite (in +# case someone mounts a configuration file in /opt/bitnami/elasticsearch/conf) +debug "Copying files from $DB_DEFAULT_CONF_DIR to $DB_CONF_DIR" +cp -nr "$DB_DEFAULT_CONF_DIR"/. "$DB_CONF_DIR" + +if ! is_dir_empty "$DB_DEFAULT_PLUGINS_DIR"; then + debug "Copying plugins from $DB_DEFAULT_PLUGINS_DIR to $DB_PLUGINS_DIR" + # Copy the plugins installed by default to the plugins directory + # If there is already a plugin with the same name in the plugins folder do nothing + for plugin_path in "${DB_DEFAULT_PLUGINS_DIR}"/*; do + plugin_name="$(basename "$plugin_path")" + plugin_moved_path="${DB_PLUGINS_DIR}/${plugin_name}" + if ! [[ -d "$plugin_moved_path" ]]; then + cp -r "$plugin_path" "$plugin_moved_path" + fi + done +fi + +if [[ "$1" = "/opt/bitnami/scripts/elasticsearch/run.sh" ]]; then + info "** Starting Elasticsearch setup **" + /opt/bitnami/scripts/elasticsearch/setup.sh + info "** Elasticsearch setup finished! **" +fi + +echo "" +exec "$@" diff --git a/bitnami/elasticsearch/7/debian-12/rootfs/opt/bitnami/scripts/elasticsearch/healthcheck.sh b/bitnami/elasticsearch/7/debian-12/rootfs/opt/bitnami/scripts/elasticsearch/healthcheck.sh new file mode 100755 index 000000000000..e1e213dd0306 --- /dev/null +++ b/bitnami/elasticsearch/7/debian-12/rootfs/opt/bitnami/scripts/elasticsearch/healthcheck.sh @@ -0,0 +1,18 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +# shellcheck disable=SC1091 + +set -o errexit +set -o nounset +set -o pipefail +# set -o xtrace # Uncomment this line for debugging purposes + +# Load libraries +. /opt/bitnami/scripts/libelasticsearch.sh + +# Load Elasticsearch environment variables +. /opt/bitnami/scripts/elasticsearch-env.sh + +elasticsearch_healthcheck diff --git a/bitnami/elasticsearch/7/debian-12/rootfs/opt/bitnami/scripts/elasticsearch/postunpack.sh b/bitnami/elasticsearch/7/debian-12/rootfs/opt/bitnami/scripts/elasticsearch/postunpack.sh new file mode 100755 index 000000000000..8dd410773c92 --- /dev/null +++ b/bitnami/elasticsearch/7/debian-12/rootfs/opt/bitnami/scripts/elasticsearch/postunpack.sh @@ -0,0 +1,48 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +# shellcheck disable=SC1091 + +# Load libraries +. /opt/bitnami/scripts/libelasticsearch.sh +. /opt/bitnami/scripts/libfs.sh + +# Load environment +. /opt/bitnami/scripts/elasticsearch-env.sh + +for dir in "$DB_TMP_DIR" "$DB_DATA_DIR" "$DB_LOGS_DIR" "${DB_BASE_DIR}/plugins" "${DB_BASE_DIR}/modules" "$DB_CONF_DIR" "$DB_VOLUME_DIR" "$DB_INITSCRIPTS_DIR" "$DB_MOUNTED_PLUGINS_DIR" "$DB_DEFAULT_CONF_DIR" "$DB_DEFAULT_PLUGINS_DIR"; do + ensure_dir_exists "$dir" + chmod -R ug+rwX "$dir" +done + +elasticsearch_configure_logging + +for dir in "$DB_TMP_DIR" "$DB_DATA_DIR" "$DB_LOGS_DIR" "${DB_BASE_DIR}/plugins" "${DB_BASE_DIR}/modules" "$DB_CONF_DIR" "$DB_VOLUME_DIR" "$DB_INITSCRIPTS_DIR" "$DB_MOUNTED_PLUGINS_DIR" "$DB_DEFAULT_CONF_DIR" "$DB_DEFAULT_PLUGINS_DIR"; do + # `elasticsearch-plugin install` command complains about being unable to create the a plugin's directory + # even when having the proper permissions. + # The reason: the code is checking trying to check the permissions by consulting the parent directory owner, + # instead of checking if the ES user actually has writing permissions. + # + # As a workaround, we will ensure the container works (at least) with the non-root user 1001. However, + # until we can avoid this hack, we can't guarantee this container to work on K8s distributions + # where containers are exectued with non-privileged users with random user IDs. + # + # Issue reported at: https://github.com/bitnami/bitnami-docker-elasticsearch/issues/50 + chown -R 1001:0 "$dir" +done + +elasticsearch_install_plugins + +# Copy all initially generated configuration files to the default directory +# (this is to avoid breaking when entrypoint is being overridden) +cp -r "${DB_CONF_DIR}/"* "$DB_DEFAULT_CONF_DIR" + +if ! is_dir_empty "$DB_PLUGINS_DIR"; then + # Move all initially installed plugins to the default plugins directory. + for plugin_path in "${DB_PLUGINS_DIR}"/*; do + plugin_name="$(basename "$plugin_path")" + plugin_moved_path="${DB_DEFAULT_PLUGINS_DIR}/${plugin_name}" + mv "$plugin_path" "$plugin_moved_path" + done +fi diff --git a/bitnami/elasticsearch/7/debian-12/rootfs/opt/bitnami/scripts/elasticsearch/run.sh b/bitnami/elasticsearch/7/debian-12/rootfs/opt/bitnami/scripts/elasticsearch/run.sh new file mode 100755 index 000000000000..531b4cc42f07 --- /dev/null +++ b/bitnami/elasticsearch/7/debian-12/rootfs/opt/bitnami/scripts/elasticsearch/run.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +# shellcheck disable=SC1091 + +set -o errexit +set -o nounset +set -o pipefail +#set -o xtrace + +# Load libraries +. /opt/bitnami/scripts/libelasticsearch.sh +. /opt/bitnami/scripts/libos.sh + +# Load environment +. /opt/bitnami/scripts/elasticsearch-env.sh + +# Constants +EXEC=$(command -v elasticsearch) +ARGS=("-p" "$DB_PID_FILE") +[[ -z "${DB_EXTRA_FLAGS:-}" ]] || ARGS=("${ARGS[@]}" "${DB_EXTRA_FLAGS[@]}") +# JAVA_HOME to be deprecated, see warning: +# warning: usage of JAVA_HOME is deprecated, use ES_JAVA_HOME +export JAVA_HOME=/opt/bitnami/java +export ES_JAVA_HOME=/opt/bitnami/java + +ARGS+=("$@") + +info "** Starting Elasticsearch **" +if am_i_root; then + exec_as_user "$DB_DAEMON_USER" "$EXEC" "${ARGS[@]}" +else + exec "$EXEC" "${ARGS[@]}" +fi diff --git a/bitnami/elasticsearch/7/debian-12/rootfs/opt/bitnami/scripts/elasticsearch/setup.sh b/bitnami/elasticsearch/7/debian-12/rootfs/opt/bitnami/scripts/elasticsearch/setup.sh new file mode 100755 index 000000000000..cdea7cf9f84c --- /dev/null +++ b/bitnami/elasticsearch/7/debian-12/rootfs/opt/bitnami/scripts/elasticsearch/setup.sh @@ -0,0 +1,35 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +# shellcheck disable=SC1091 + +set -o errexit +set -o nounset +set -o pipefail +#set -o xtrace + +# Load libraries +. /opt/bitnami/scripts/libos.sh +. /opt/bitnami/scripts/libfs.sh +. /opt/bitnami/scripts/libelasticsearch.sh + +# Load environment +. /opt/bitnami/scripts/elasticsearch-env.sh + +# Ensure Elasticsearch environment variables settings are valid +elasticsearch_validate +# Ensure Elasticsearch is stopped when this script ends +trap "elasticsearch_stop" EXIT +# Ensure 'daemon' user exists when running as 'root' +am_i_root && ensure_user_exists "$DB_DAEMON_USER" --group "$DB_DAEMON_GROUP" +# Ensure Elasticsearch is initialized +elasticsearch_initialize +# Ensure kernel settings are valid +elasticsearch_validate_kernel +# Install Elasticsearch plugins +elasticsearch_install_plugins +# Ensure custom initialization scripts are executed +elasticsearch_custom_init_scripts +# Ensure all the required keys are added after plugins are installed +elasticsearch_set_keys diff --git a/bitnami/elasticsearch/7/debian-12/rootfs/opt/bitnami/scripts/java/entrypoint.sh b/bitnami/elasticsearch/7/debian-12/rootfs/opt/bitnami/scripts/java/entrypoint.sh new file mode 100755 index 000000000000..c3a1e2383fa1 --- /dev/null +++ b/bitnami/elasticsearch/7/debian-12/rootfs/opt/bitnami/scripts/java/entrypoint.sh @@ -0,0 +1,19 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +# shellcheck disable=SC1091 + +set -o errexit +set -o nounset +set -o pipefail +# set -o xtrace # Uncomment this line for debugging purposes + +# Load libraries +. /opt/bitnami/scripts/libbitnami.sh +. /opt/bitnami/scripts/liblog.sh + +print_welcome_page + +echo "" +exec "$@" diff --git a/bitnami/elasticsearch/7/debian-12/rootfs/opt/bitnami/scripts/java/postunpack.sh b/bitnami/elasticsearch/7/debian-12/rootfs/opt/bitnami/scripts/java/postunpack.sh new file mode 100755 index 000000000000..52dbf4f13673 --- /dev/null +++ b/bitnami/elasticsearch/7/debian-12/rootfs/opt/bitnami/scripts/java/postunpack.sh @@ -0,0 +1,26 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 + +# shellcheck disable=SC1091 + +set -o errexit +set -o nounset +set -o pipefail +# set -o xtrace # Uncomment this line for debugging purposes + +# Load libraries +. /opt/bitnami/scripts/libfile.sh +. /opt/bitnami/scripts/liblog.sh + +# +# Java post-unpack operations +# + +# Override default files in the Java security directory. This is used for +# custom base images (with custom CA certificates or block lists is used) + +if [[ -n "${JAVA_EXTRA_SECURITY_DIR:-}" ]] && ! is_dir_empty "$JAVA_EXTRA_SECURITY_DIR"; then + info "Adding custom CAs to the Java security folder" + cp -Lr "${JAVA_EXTRA_SECURITY_DIR}/." /opt/bitnami/java/lib/security +fi diff --git a/bitnami/elasticsearch/7/debian-12/rootfs/opt/bitnami/scripts/libelasticsearch.sh b/bitnami/elasticsearch/7/debian-12/rootfs/opt/bitnami/scripts/libelasticsearch.sh new file mode 100644 index 000000000000..4713bb5da79b --- /dev/null +++ b/bitnami/elasticsearch/7/debian-12/rootfs/opt/bitnami/scripts/libelasticsearch.sh @@ -0,0 +1,929 @@ +#!/bin/bash +# Copyright VMware, Inc. +# SPDX-License-Identifier: APACHE-2.0 +# +# Bitnami Elasticsearch library + +# shellcheck disable=SC1090,SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/libfs.sh +. /opt/bitnami/scripts/libfile.sh +. /opt/bitnami/scripts/liblog.sh +. /opt/bitnami/scripts/libnet.sh +. /opt/bitnami/scripts/libos.sh +. /opt/bitnami/scripts/libversion.sh +. /opt/bitnami/scripts/libservice.sh +. /opt/bitnami/scripts/libvalidations.sh + +# Functions + +######################## +# Configure TLS settings +# Globals: +# ELASTICSEARCH_* +# Arguments: +# None +# Returns: +# None +######################### +elasticsearch_transport_tls_configuration() { + info "Configuring Elasticsearch Transport TLS settings..." + elasticsearch_conf_set xpack.security.transport.ssl.enabled "true" + elasticsearch_conf_set xpack.security.transport.ssl.verification_mode "$DB_TLS_VERIFICATION_MODE" + + if is_boolean_yes "$DB_TRANSPORT_TLS_USE_PEM"; then + debug "Configuring Transport Layer TLS settings using PEM certificates..." + ! is_empty_value "$DB_TRANSPORT_TLS_KEY_PASSWORD" && elasticsearch_set_key_value "xpack.security.transport.ssl.secure_key_passphrase" "$DB_TRANSPORT_TLS_KEY_PASSWORD" + elasticsearch_conf_set xpack.security.transport.ssl.key "$DB_TRANSPORT_TLS_NODE_KEY_LOCATION" + elasticsearch_conf_set xpack.security.transport.ssl.certificate "$DB_TRANSPORT_TLS_NODE_CERT_LOCATION" + elasticsearch_conf_set xpack.security.transport.ssl.certificate_authorities "$DB_TRANSPORT_TLS_CA_CERT_LOCATION" + else + debug "Configuring Transport Layer TLS settings using JKS/PKCS certificates..." + ! is_empty_value "$DB_TRANSPORT_TLS_KEYSTORE_PASSWORD" && elasticsearch_set_key_value "xpack.security.transport.ssl.keystore.secure_password" "$DB_TRANSPORT_TLS_KEYSTORE_PASSWORD" + ! is_empty_value "$DB_TRANSPORT_TLS_TRUSTSTORE_PASSWORD" && elasticsearch_set_key_value "xpack.security.transport.ssl.truststore.secure_password" "$DB_TRANSPORT_TLS_TRUSTSTORE_PASSWORD" + elasticsearch_conf_set xpack.security.transport.ssl.keystore.path "$DB_TRANSPORT_TLS_KEYSTORE_LOCATION" + elasticsearch_conf_set xpack.security.transport.ssl.truststore.path "$DB_TRANSPORT_TLS_TRUSTSTORE_LOCATION" + fi +} + +######################## +# Configure TLS settings +# Globals: +# DB_* +# Arguments: +# None +# Returns: +# None +######################### +elasticsearch_http_tls_configuration() { + info "Configuring Elasticsearch HTTP TLS settings..." + elasticsearch_conf_set xpack.security.http.ssl.enabled "true" + if is_boolean_yes "$DB_HTTP_TLS_USE_PEM"; then + debug "Configuring REST API TLS settings using PEM certificates..." + ! is_empty_value "$DB_HTTP_TLS_KEY_PASSWORD" && elasticsearch_set_key_value "xpack.security.http.ssl.secure_key_passphrase" "$DB_HTTP_TLS_KEY_PASSWORD" + elasticsearch_conf_set xpack.security.http.ssl.key "$DB_HTTP_TLS_NODE_KEY_LOCATION" + elasticsearch_conf_set xpack.security.http.ssl.certificate "$DB_HTTP_TLS_NODE_CERT_LOCATION" + elasticsearch_conf_set xpack.security.http.ssl.certificate_authorities "$DB_HTTP_TLS_CA_CERT_LOCATION" + else + debug "Configuring REST API TLS settings using JKS/PKCS certificates..." + ! is_empty_value "$DB_HTTP_TLS_KEYSTORE_PASSWORD" && elasticsearch_set_key_value "xpack.security.http.ssl.keystore.secure_password" "$DB_HTTP_TLS_KEYSTORE_PASSWORD" + ! is_empty_value "$DB_HTTP_TLS_TRUSTSTORE_PASSWORD" && elasticsearch_set_key_value "xpack.security.http.ssl.truststore.secure_password" "$DB_HTTP_TLS_TRUSTSTORE_PASSWORD" + elasticsearch_conf_set xpack.security.http.ssl.keystore.path "$DB_HTTP_TLS_KEYSTORE_LOCATION" + elasticsearch_conf_set xpack.security.http.ssl.truststore.path "$DB_HTTP_TLS_TRUSTSTORE_LOCATION" + fi +} + +######################## +# Migrate old Elasticsearch data +# Globals: +# DB_* +# Arguments: +# None +# Returns: +# None +######################### +migrate_old_data() { + warn "Persisted data follows old structure. Migrating to new one..." + warn "Custom configuration files won't be persisted any longer!" + local old_data_dir="${DB_DATA_DIR}/elasticsearch" + local old_custom_conf_file="${old_data_dir}/conf/elasticsearch_custom.yml" + local custom_conf_file="${DB_CONF_DIR}/elasticsearch_custom.yml" + if [[ -f "$old_custom_conf_file" ]]; then + debug "Adding old custom configuration to user configuration" + echo "" >>"$custom_conf_file" + cat "$old_custom_conf_file" >>"$custom_conf_file" + fi + debug "Adapting data to new file structure" + find "${old_data_dir}/data" -maxdepth 1 -mindepth 1 -exec mv {} "$DB_DATA_DIR" \; + debug "Removing data that is not persisted anymore from persisted directory" + rm -rf "$old_data_dir" "${DB_DATA_DIR}/java" +} + +######################## +# Set Elasticsearch keystore values +# Globals: +# ELASTICSEARCH_KEYS +# Arguments: +# None +# Returns: +# None +######################### +elasticsearch_set_keys() { + read -r -a keys_list <<<"$(tr ',;' ' ' <<<"$ELASTICSEARCH_KEYS")" + if [[ "${#keys_list[@]}" -gt 0 ]]; then + for key_value in "${keys_list[@]}"; do + read -r -a key_value <<<"$(tr '=' ' ' <<<"$key_value")" + local key="${key_value[0]}" + local value="${key_value[1]}" + + elasticsearch_set_key_value "$key" "$value" + done + fi +} + +######################## +# Set Elasticsearch keystore values +# Globals: +# ELASTICSEARCH_* +# Arguments: +# None +# Returns: +# None +######################### +elasticsearch_set_key_value() { + local key="${1:?missing key}" + local value="${2:?missing value}" + + debug "Storing key: ${key}" + elasticsearch-keystore add --stdin --force "$key" <<<"$value" + + am_i_root && chown "$DB_DAEMON_USER:$DB_DAEMON_GROUP" "${DB_CONF_DIR}/elasticsearch.keystore" + # Avoid exit code of previous commands to affect the result of this function + true +} + +#!/bin/bash +# +# Bitnami Elasticsearch/Opensearch common library + +# shellcheck disable=SC1090,SC1091 + +# Load Generic Libraries +. /opt/bitnami/scripts/libfs.sh +. /opt/bitnami/scripts/libfile.sh +. /opt/bitnami/scripts/liblog.sh +. /opt/bitnami/scripts/libnet.sh +. /opt/bitnami/scripts/libos.sh +. /opt/bitnami/scripts/libversion.sh +. /opt/bitnami/scripts/libservice.sh +. /opt/bitnami/scripts/libvalidations.sh + +# Functions + +######################## +# Write a configuration setting value +# Globals: +# DB_CONF_FILE +# Arguments: +# $1 - key +# $2 - value +# $3 - YAML type (string, int or bool) +# Returns: +# None +######################### +elasticsearch_conf_write() { + local -r key="${1:?Missing key}" + local -r value="${2:-}" + local -r type="${3:-string}" + local -r tempfile=$(mktemp) + + case "$type" in + string) + yq eval "(.${key}) |= \"${value}\"" "$DB_CONF_FILE" >"$tempfile" + ;; + int) + yq eval "(.${key}) |= ${value}" "$DB_CONF_FILE" >"$tempfile" + ;; + bool) + yq eval "(.${key}) |= (\"${value}\" | test(\"true\"))" "$DB_CONF_FILE" >"$tempfile" + ;; + *) + error "Type unknown: ${type}" + return 1 + ;; + esac + cp "$tempfile" "$DB_CONF_FILE" +} + +######################## +# Set a configuration setting value +# Globals: +# DB_CONF_FILE +# Arguments: +# $1 - key +# $2 - values (array) +# Returns: +# None +######################### +elasticsearch_conf_set() { + local key="${1:?missing key}" + shift + local values=("${@}") + + if [[ "${#values[@]}" -eq 0 ]]; then + stderr_print "$key" + stderr_print "missing values" + return 1 + elif [[ "${#values[@]}" -eq 1 ]] && [[ -n "${values[0]}" ]]; then + elasticsearch_conf_write "$key" "${values[0]}" + else + for i in "${!values[@]}"; do + if [[ -n "${values[$i]}" ]]; then + elasticsearch_conf_write "${key}[$i]" "${values[$i]}" + fi + done + fi +} + +######################## +# Check if Elasticsearch is running +# Globals: +# DB_TMP_DIR +# Arguments: +# None +# Returns: +# Boolean +######################### +is_elasticsearch_running() { + local pid + pid="$(get_pid_from_file "$DB_PID_FILE")" + + if [[ -z "$pid" ]]; then + false + else + is_service_running "$pid" + fi +} + +######################## +# Check if Elasticsearch is not running +# Globals: +# DB_TMP_DIR +# Arguments: +# None +# Returns: +# Boolean +######################### +is_elasticsearch_not_running() { + ! is_elasticsearch_running + return "$?" +} + +######################## +# Stop Elasticsearch +# Globals: +# DB_TMP_DIR +# Arguments: +# None +# Returns: +# None +######################### +elasticsearch_stop() { + ! is_elasticsearch_running && return + debug "Stopping ${DB_FLAVOR^}..." + stop_service_using_pid "$DB_PID_FILE" +} + +######################## +# Start Elasticsearch and wait until it's ready +# Globals: +# DB_* +# Arguments: +# None +# Returns: +# None +######################### +elasticsearch_start() { + is_elasticsearch_running && return + + debug "Starting ${DB_FLAVOR^}..." + local command=("${DB_BASE_DIR}/bin/${DB_FLAVOR}" "-d" "-p" "$DB_PID_FILE") + am_i_root && command=("run_as_user" "$DB_DAEMON_USER" "${command[@]}") + if [[ "$BITNAMI_DEBUG" = true ]]; then + "${command[@]}" & + else + "${command[@]}" >/dev/null 2>&1 & + fi + + local retries=50 + local seconds=2 + # Check the process is running + retry_while "is_elasticsearch_running" "$retries" "$seconds" + # Check Elasticsearch API is reachable + retry_while "elasticsearch_healthcheck" "$retries" "$seconds" +} + +######################## +# Validate kernel settings +# Arguments: +# None +# Returns: +# None +######################### +elasticsearch_validate_kernel() { + # Auxiliary functions + validate_sysctl_key() { + local key="${1:?key is missing}" + local value="${2:?value is missing}" + local current_value + current_value="$(sysctl -n "$key")" + if [[ "$current_value" -lt "$value" ]]; then + error "Invalid kernel settings. ${DB_FLAVOR^} requires at least: $key = $value" + exit 1 + fi + } + + debug "Validating Kernel settings..." + if [[ $(yq eval .index.store.type "$DB_CONF_FILE") ]]; then + debug "Custom index.store.type found in the config file. Skipping kernel validation..." + else + validate_sysctl_key "fs.file-max" 65536 + fi + if [[ $(yq eval .node.store.allow_mmap "$DB_CONF_FILE") ]]; then + debug "Custom node.store.allow_mmap found in the config file. Skipping kernel validation..." + else + validate_sysctl_key "vm.max_map_count" 262144 + fi +} + +######################## +# Validate settings in DB_* env vars +# Globals: +# DB_* +# Arguments: +# None +# Returns: +# None +######################### +elasticsearch_validate() { + local error_code=0 + + # Auxiliary functions + print_validation_error() { + error "$1" + error_code=1 + } + + validate_node_roles() { + if [ -n "$DB_NODE_ROLES" ]; then + read -r -a roles_list <<<"$(get_elasticsearch_roles)" + local master_role="master" + [[ "$DB_FLAVOR" = "opensearch" && "$APP_VERSION" =~ ^2\. ]] && master_role="cluster_manager" + if [[ "${#roles_list[@]}" -le 0 ]]; then + warn "Setting ${DB_FLAVOR^^}_NODE_ROLES is empty and ${DB_FLAVOR^^}_IS_DEDICATED_NODE is set to true, ${DB_FLAVOR^} will be configured as coordinating-only node." + fi + for role in "${roles_list[@]}"; do + case "$role" in + "$master_role" | data | data_content | data_hot | data_warm | data_cold | data_frozen | ingest | ml | remote_cluster_client | transform) ;; + + *) + print_validation_error "Invalid node role '$role'. Supported roles are '${master_role},data,data_content,data_hot,data_warm,data_cold,data_frozen,ingest,ml,remote_cluster_client,transform'" + ;; + esac + done + fi + } + + debug "Ensuring expected directories/files exist..." + am_i_root && ensure_user_exists "$DB_DAEMON_USER" --group "$DB_DAEMON_GROUP" + for dir in "$DB_TMP_DIR" "$DB_LOGS_DIR" "$DB_PLUGINS_DIR" "$DB_BASE_DIR/modules" "$DB_CONF_DIR"; do + ensure_dir_exists "$dir" + am_i_root && chown -R "$DB_DAEMON_USER:$DB_DAEMON_GROUP" "$dir" + done + + debug "Validating settings in DB_* env vars..." + for var in "DB_HTTP_PORT_NUMBER" "DB_TRANSPORT_PORT_NUMBER"; do + if ! err=$(validate_port "${!var}"); then + print_validation_error "An invalid port was specified in the environment variable $var: $err" + fi + done + + if ! is_boolean_yes "$DB_IS_DEDICATED_NODE"; then + warn "Setting ${DB_FLAVOR^^}_IS_DEDICATED_NODE is disabled." + warn "${DB_FLAVOR^^}_NODE_ROLES will be ignored and ${DB_FLAVOR^} will asume all different roles." + else + validate_node_roles + fi + + if [[ -n "$DB_BIND_ADDRESS" ]] && ! validate_ipv4 "$DB_BIND_ADDRESS"; then + print_validation_error "The Bind Address specified in the environment variable ${DB_FLAVOR^^}_BIND_ADDRESS is not a valid IPv4" + fi + + if is_boolean_yes "$DB_ENABLE_SECURITY"; then + if [[ "$DB_FLAVOR" = "opensearch" ]]; then + if [[ ! -f "$OPENSEARCH_SECURITY_ADMIN_KEY_LOCATION" ]] || [[ ! -f "$OPENSEARCH_SECURITY_ADMIN_CERT_LOCATION" ]]; then + print_validation_error "In order to enable Opensearch Security, you must provide a valid admin PEM key and certificate." + fi + if is_empty_value "$OPENSEARCH_SECURITY_NODES_DN"; then + print_validation_error "The variable OPENSEARCH_SECURITY_NODES_DN is required." + fi + if is_empty_value "$OPENSEARCH_SECURITY_ADMIN_DN"; then + print_validation_error "The variable OPENSEARCH_SECURITY_ADMIN_DN is required." + fi + if ! is_boolean_yes "$OPENSEARCH_ENABLE_REST_TLS"; then + print_validation_error "Opensearch does not support plaintext conections (HTTP) when Security is enabled." + fi + fi + if ! is_boolean_yes "$DB_SKIP_TRANSPORT_TLS"; then + if is_boolean_yes "$DB_TRANSPORT_TLS_USE_PEM"; then + if [[ ! -f "$DB_TRANSPORT_TLS_NODE_CERT_LOCATION" ]] || [[ ! -f "$DB_TRANSPORT_TLS_NODE_KEY_LOCATION" ]] || [[ ! -f "$DB_TRANSPORT_TLS_CA_CERT_LOCATION" ]]; then + print_validation_error "In order to configure the TLS encryption for ${DB_FLAVOR^} Transport you must provide your node key, certificate and a valid certification_authority certificate." + fi + elif [[ ! -f "$DB_TRANSPORT_TLS_KEYSTORE_LOCATION" ]] || [[ ! -f "$DB_TRANSPORT_TLS_TRUSTSTORE_LOCATION" ]]; then + print_validation_error "In order to configure the TLS encryption for ${DB_FLAVOR^} Transport with JKS/PKCS12 certs you must mount a valid keystore and truststore." + fi + fi + if is_boolean_yes "$DB_HTTP_TLS_USE_PEM"; then + if is_boolean_yes "$DB_HTTP_TLS_USE_PEM"; then + if [[ ! -f "$DB_HTTP_TLS_NODE_CERT_LOCATION" ]] || [[ ! -f "$DB_HTTP_TLS_NODE_KEY_LOCATION" ]] || [[ ! -f "$DB_HTTP_TLS_CA_CERT_LOCATION" ]]; then + print_validation_error "In order to configure the TLS encryption for ${DB_FLAVOR^} you must provide your node key, certificate and a valid certification_authority certificate." + fi + elif [[ ! -f "$DB_HTTP_TLS_KEYSTORE_LOCATION" ]] || [[ ! -f "$DB_HTTP_TLS_TRUSTSTORE_LOCATION" ]]; then + print_validation_error "In order to configure the TLS encryption for ${DB_FLAVOR^} with JKS/PKCS12 certs you must mount a valid keystore and truststore." + fi + fi + fi + + [[ "$error_code" -eq 0 ]] || exit "$error_code" +} + +######################## +# Determine the hostname by which Elasticsearch can be contacted +# Returns: +# The value of $DB_ADVERTISED_HOSTNAME or the current host address +######################## +get_elasticsearch_hostname() { + if [[ -n "$DB_ADVERTISED_HOSTNAME" ]]; then + echo "$DB_ADVERTISED_HOSTNAME" + else + get_machine_ip + fi +} + +######################## +# Evaluates the env variable DB_NODE_ROLES and replaces master with +# Globals: +# DB_* +# Arguments: +# None +# Returns: +# Array of node roles +######################### +get_elasticsearch_roles() { + read -r -a roles_list_tmp <<<"$(tr ',;' ' ' <<<"$DB_NODE_ROLES")" + roles_list=("${roles_list_tmp[@]}") + for i in "${!roles_list[@]}"; do + if [[ ${roles_list[$i]} == "master" ]] && [[ "$DB_FLAVOR" = "opensearch" && "$APP_VERSION" =~ ^2\. ]]; then + roles_list[i]="cluster_manager" + fi + done + echo "${roles_list[@]}" +} + +######################## +# Configure cluster settings +# Globals: +# DB_* +# Arguments: +# None +# Returns: +# None +######################### +elasticsearch_cluster_configuration() { + # Auxiliary functions + bind_address() { + if [[ -n "$DB_BIND_ADDRESS" ]]; then + echo "$DB_BIND_ADDRESS" + else + echo "0.0.0.0" + fi + } + + is_node_master() { + if is_boolean_yes "$DB_IS_DEDICATED_NODE"; then + if [ -n "$DB_NODE_ROLES" ]; then + read -r -a roles_list <<<"$(get_elasticsearch_roles)" + if [[ " ${roles_list[*]} " = *" master "* ]]; then + true + elif [[ "$DB_FLAVOR" = "opensearch" && " ${roles_list[*]} " = *" cluster_manager "* ]]; then + true + else + false + fi + else + false + fi + else + true + fi + } + + info "Configuring ${DB_FLAVOR^} cluster settings..." + elasticsearch_conf_set network.host "$(get_elasticsearch_hostname)" + elasticsearch_conf_set network.publish_host "$(get_elasticsearch_hostname)" + elasticsearch_conf_set network.bind_host "$(bind_address)" + elasticsearch_conf_set cluster.name "$DB_CLUSTER_NAME" + elasticsearch_conf_set node.name "${DB_NODE_NAME:-$(hostname)}" + + if [[ -n "$DB_CLUSTER_HOSTS" ]]; then + read -r -a host_list <<<"$(tr ',;' ' ' <<<"$DB_CLUSTER_HOSTS")" + master_list=("${host_list[@]}") + if [[ -n "$DB_CLUSTER_MASTER_HOSTS" ]]; then + read -r -a master_list <<<"$(tr ',;' ' ' <<<"$DB_CLUSTER_MASTER_HOSTS")" + fi + elasticsearch_conf_set discovery.seed_hosts "${host_list[@]}" + if is_node_master; then + if [[ "$DB_FLAVOR" = "opensearch" && "$APP_VERSION" =~ ^2\. ]]; then + elasticsearch_conf_set cluster.initial_cluster_manager_nodes "${master_list[@]}" + else + elasticsearch_conf_set cluster.initial_master_nodes "${master_list[@]}" + fi + fi + elasticsearch_conf_set discovery.initial_state_timeout "10m" + else + elasticsearch_conf_set "discovery.type" "single-node" + fi +} + +######################## +# Extend cluster settings with custom, user-provided config +# Globals: +# DB_* +# Arguments: +# None +# Returns: +# None +######################### +elasticsearch_custom_configuration() { + local custom_conf_file="${DB_CONF_DIR}/my_${DB_FLAVOR}.yml" + local -r tempfile=$(mktemp) + [[ ! -s "$custom_conf_file" ]] && return + info "Adding custom configuration" + yq eval-all 'select(fileIndex == 0) * select(fileIndex == 1)' "$DB_CONF_FILE" "$custom_conf_file" >"$tempfile" + cp "$tempfile" "$DB_CONF_FILE" +} + +######################## +# Configure node roles. +# There are 3 scenarios: +# * If DB_IS_DEDICATED_NODE is disabled, 'node.roles' is omitted and assumes all the roles (check docs). +# * Otherwise, 'node.roles' with a list of roles provided with DB_NODE_ROLES. +# * In addition, if DB_NODE_ROLES is empty, node.roles will be configured empty, meaning that the role is 'coordinating-only'. +# +# Docs ref: https://www.elastic.co/guide/en/opensearch/reference/current/modules-node.html +# +# Globals: +# DB_* +# Arguments: +# None +# Returns: +# None +######################### +elasticsearch_configure_node_roles() { + debug "Configure ${DB_FLAVOR^} Node roles..." + + local set_repo_path="no" + if is_boolean_yes "$DB_IS_DEDICATED_NODE"; then + read -r -a roles_list <<<"$(get_elasticsearch_roles)" + if [[ "${#roles_list[@]}" -eq 0 ]]; then + elasticsearch_conf_write node.roles "[]" int + else + elasticsearch_conf_set node.roles "${roles_list[@]}" + for role in "${roles_list[@]}"; do + case "$role" in + cluster_manager | master | data | data_content | data_hot | data_warm | data_cold | data_frozen) + set_repo_path="yes" + ;; + *) ;; + esac + done + fi + else + set_repo_path="yes" + fi + + if is_boolean_yes "$set_repo_path" && [[ -n "$DB_FS_SNAPSHOT_REPO_PATH" ]]; then + # Configure path.repo to restore snapshots from system repository + # It must be set on every cluster_manager an data node + # ref: https://www.elastic.co/guide/en/opensearch/reference/current/snapshots-register-repository.html#snapshots-filesystem-repository + elasticsearch_conf_set path.repo "$DB_FS_SNAPSHOT_REPO_PATH" + fi +} + +######################## +# Configure Heap Size +# Globals: +# DB_* +# Arguments: +# None +# Returns: +# None +######################### +elasticsearch_set_heap_size() { + local heap_size + + # Remove heap.options if it already exists + rm -f "${DB_CONF_DIR}/jvm.options.d/heap.options" + + if [[ -n "$DB_HEAP_SIZE" ]]; then + debug "Using specified values for Xmx and Xms heap options..." + heap_size="$DB_HEAP_SIZE" + else + debug "Calculating appropriate Xmx and Xms values..." + local machine_mem="" + machine_mem="$(get_total_memory)" + if [[ "$machine_mem" -lt 65536 ]]; then + local max_allowed_memory + local calculated_heap_size + calculated_heap_size="$((machine_mem / 2))" + max_allowed_memory="$((DB_MAX_ALLOWED_MEMORY_PERCENTAGE * machine_mem))" + max_allowed_memory="$((max_allowed_memory / 100))" + # Allow for absolute memory limit when calculating limit from percentage + if [[ -n "$DB_MAX_ALLOWED_MEMORY" && "$max_allowed_memory" -gt "$DB_MAX_ALLOWED_MEMORY" ]]; then + max_allowed_memory="$DB_MAX_ALLOWED_MEMORY" + fi + if [[ "$calculated_heap_size" -gt "$max_allowed_memory" ]]; then + info "Calculated Java heap size of ${calculated_heap_size} will be limited to ${max_allowed_memory}" + calculated_heap_size="$max_allowed_memory" + fi + heap_size="${calculated_heap_size}m" + + else + heap_size=32768m + fi + fi + debug "Setting '-Xmx${heap_size} -Xms${heap_size}' heap options..." + cat >"${DB_CONF_DIR}/jvm.options.d/heap.options" < plugin + # get_plugin_name file://plugin.zip -> plugin + # get_plugin_name http://plugin-0.1.2.zip -> plugin + get_plugin_name() { + local plugin="${1:?missing plugin}" + # Remove any paths, and strip both the .zip extension and the version + basename "$plugin" | sed -E -e 's/.zip$//' -e 's/-[0-9]+\.[0-9]+(\.[0-9]+){0,}$//' + } + + # Collect plugins that should be installed offline + read -r -a mounted_plugins <<<"$(find "$DB_MOUNTED_PLUGINS_DIR" -type f -name "*.zip" -print0 | xargs -0)" + if [[ "${#mounted_plugins[@]}" -gt 0 ]]; then + for plugin in "${mounted_plugins[@]}"; do + plugins_list+=("file://${plugin}") + done + fi + + # Skip if there isn't any plugin to install + [[ -z "${plugins_list[*]:-}" ]] && return + + # Install plugins + debug "Installing plugins: ${plugins_list[*]}" + for plugin in "${plugins_list[@]}"; do + plugin_name="$(get_plugin_name "$plugin")" + [[ -n "$mandatory_plugins" ]] && mandatory_plugins="${mandatory_plugins},${plugin_name}" || mandatory_plugins="$plugin_name" + + # Check if the plugin was already installed + if [[ -d "${DB_PLUGINS_DIR}/${plugin_name}" ]]; then + debug "Plugin already installed: ${plugin}" + continue + fi + + debug "Installing plugin: ${plugin}" + if [[ "${BITNAMI_DEBUG:-false}" = true ]]; then + "$cmd" install -b -v "$plugin" + else + "$cmd" install -b -v "$plugin" >/dev/null 2>&1 + fi + done + + # Mark plugins as mandatory + elasticsearch_conf_set plugin.mandatory "$mandatory_plugins" +} + +######################## +# Run custom initialization scripts +# Globals: +# DB_* +# Arguments: +# None +# Returns: +# None +######################### +elasticsearch_custom_init_scripts() { + read -r -a init_scripts <<<"$(find "$DB_INITSCRIPTS_DIR" -type f -name "*.sh" -print0 | xargs -0)" + if [[ "${#init_scripts[@]}" -gt 0 ]] && [[ ! -f "$DB_VOLUME_DIR"/.user_scripts_initialized ]]; then + info "Loading user's custom files from $DB_INITSCRIPTS_DIR" + for f in "${init_scripts[@]}"; do + debug "Executing $f" + case "$f" in + *.sh) + if [[ -x "$f" ]]; then + if ! "$f"; then + error "Failed executing $f" + return 1 + fi + else + warn "Sourcing $f as it is not executable by the current user, any error may cause initialization to fail" + . "$f" + fi + ;; + *) + warn "Skipping $f, supported formats are: .sh" + ;; + esac + done + touch "$DB_VOLUME_DIR"/.user_scripts_initialized + fi +} + +######################## +# Modify log4j2.properties to send events to stdout instead of a logfile +# Globals: +# DB_* +# Arguments: +# None +# Returns: +# None +######################### +elasticsearch_configure_logging() { + # Back up the original file for users who'd like to use logfile logging + cp "${DB_CONF_DIR}/log4j2.properties" "${DB_CONF_DIR}/log4j2.file.properties" + + # Replace RollingFile with Console + replace_in_file "${DB_CONF_DIR}/log4j2.properties" "RollingFile" "Console" + + local -a delete_patterns=( + # Remove RollingFile specific settings + "^.*\.policies\..*$" "^.*\.filePattern.*$" "^.*\.fileName.*$" "^.*\.strategy\..*$" + # Remove headers + "^###.*$" + # Remove .log and .json because of multiline configurations (filename) + "^\s\s.*\.log" "^\s\s.*\.json" + # Remove default rolling logger and references + "^appender\.rolling" "appenderRef\.rolling" + # Remove _old loggers + "_old\." + # Remove .filePermissions config + "\.filePermissions" + ) + for pattern in "${delete_patterns[@]}"; do + remove_in_file "${DB_CONF_DIR}/log4j2.properties" "$pattern" + done +} + +######################## +# Check Elasticsearch/Opensearch health +# Globals: +# DB_* +# Arguments: +# None +# Returns: +# 0 when healthy (or waiting for Opensearch security bootstrap) +# 1 when unhealthy +######################### +elasticsearch_healthcheck() { + info "Checking ${DB_FLAVOR^} health..." + local -r cmd="curl" + local command_args=("--silent" "--write-out" "%{http_code}") + local protocol="http" + local host + + host=$(get_elasticsearch_hostname) + + if is_boolean_yes "$DB_ENABLE_SECURITY"; then + command_args+=("-k" "--user" "${DB_USERNAME}:${DB_PASSWORD}") + is_boolean_yes "$DB_ENABLE_REST_TLS" && protocol="https" + fi + + # Combination of --silent, --output and --write-out allows us to obtain both the status code and the request body + output=$(mktemp) + command_args+=("-o" "$output" "${protocol}://${host}:${DB_HTTP_PORT_NUMBER}/_cluster/health?local=true") + HTTP_CODE=$("$cmd" "${command_args[@]}") + if [[ ${HTTP_CODE} -ge 200 && ${HTTP_CODE} -le 299 ]] || ([[ "$DB_FLAVOR" = "opensearch" ]] && [[ ${HTTP_CODE} -eq 503 ]] && grep -q "OpenSearch Security not initialized" "$output" ); then + rm "$output" + return 0 + else + rm "$output" + return 1 + fi +} diff --git a/bitnami/elasticsearch/7/debian-12/tags-info.yaml b/bitnami/elasticsearch/7/debian-12/tags-info.yaml new file mode 100644 index 000000000000..06978fbe374d --- /dev/null +++ b/bitnami/elasticsearch/7/debian-12/tags-info.yaml @@ -0,0 +1,4 @@ +rolling-tags: +- "7" +- 7-debian-12 +- 7.17.18 diff --git a/bitnami/elasticsearch/README.md b/bitnami/elasticsearch/README.md index 31aad6181619..3239a2ca2c24 100644 --- a/bitnami/elasticsearch/README.md +++ b/bitnami/elasticsearch/README.md @@ -230,6 +230,7 @@ docker-compose up -d | `ELASTICSEARCH_VOLUME_DIR` | Persistence base directory | `/bitnami/elasticsearch` | | `ELASTICSEARCH_BASE_DIR` | Elasticsearch installation directory | `/opt/bitnami/elasticsearch` | | `ELASTICSEARCH_CONF_DIR` | Elasticsearch configuration directory | `${DB_BASE_DIR}/config` | +| `ELASTICSEARCH_DEFAULT_CONF_DIR` | Elasticsearch default configuration directory | `${DB_BASE_DIR}/config.default` | | `ELASTICSEARCH_LOGS_DIR` | Elasticsearch logs directory | `${DB_BASE_DIR}/logs` | | `ELASTICSEARCH_PLUGINS_DIR` | Elasticsearch plugins directory | `${DB_BASE_DIR}/plugins` | | `ELASTICSEARCH_DEFAULT_PLUGINS_DIR` | Elasticsearch default plugins directory | `${DB_BASE_DIR}/plugins.default` |