Remove debian-10 directory

Signed-off-by: Carlos Rodriguez Hernandez <carlosrh@vmware.com>
This commit is contained in:
Carlos Rodriguez Hernandez
2022-06-22 10:51:32 +00:00
committed by Bitnami Containers
parent cf7c923357
commit 664feafbd5
23 changed files with 0 additions and 3132 deletions

View File

@@ -1,31 +0,0 @@
FROM docker.io/bitnami/minideb:buster
LABEL maintainer "Bitnami <containers@bitnami.com>"
ENV HOME="/" \
OS_ARCH="amd64" \
OS_FLAVOUR="debian-10" \
OS_NAME="linux"
COPY prebuildfs /
# Install required system packages and dependencies
RUN install_packages acl ca-certificates curl gzip libc6 libgcc1 procps tar
RUN . /opt/bitnami/scripts/libcomponent.sh && component_unpack "wait-for-port" "1.0.3-0" --checksum 0603c8eaf6d24e76563431e36e512da06bfebb3a06ede31b3e84d9879213c162
RUN . /opt/bitnami/scripts/libcomponent.sh && component_unpack "influxdb" "2.2.0-0" --checksum 1aac0d4dd91fbc2e7300235926c3108cf54e90aac72aed6602f6c7fce7286352
RUN . /opt/bitnami/scripts/libcomponent.sh && component_unpack "gosu" "1.14.0-7" --checksum d6280b6f647a62bf6edc74dc8e526bfff63ddd8067dcb8540843f47203d9ccf1
RUN apt-get update && apt-get upgrade -y && \
rm -r /var/lib/apt/lists /var/cache/apt/archives
RUN chmod g+rwX /opt/bitnami
COPY rootfs /
RUN /opt/bitnami/scripts/influxdb/postunpack.sh
ENV APP_VERSION="2.2.0" \
BITNAMI_APP_NAME="influxdb" \
PATH="/opt/bitnami/common/bin:/opt/bitnami/influxdb/bin:$PATH"
VOLUME [ "/bitnami/influxdb" ]
EXPOSE 8086 8088
USER 1001
ENTRYPOINT [ "/opt/bitnami/scripts/influxdb/entrypoint.sh" ]
CMD [ "/opt/bitnami/scripts/influxdb/run.sh" ]

View File

@@ -1,15 +0,0 @@
version: '2'
services:
influxdb:
image: docker.io/bitnami/influxdb:2
ports:
- 8086:8086
- 8088:8088
environment:
- INFLUXDB_ADMIN_USER_PASSWORD=bitnami123
- INFLUXDB_ADMIN_USER_TOKEN=admintoken123
volumes:
- influxdb_data:/bitnami/influxdb
volumes:
influxdb_data:
driver: local

View File

@@ -1,23 +0,0 @@
{
"gosu": {
"arch": "amd64",
"digest": "d6280b6f647a62bf6edc74dc8e526bfff63ddd8067dcb8540843f47203d9ccf1",
"distro": "debian-10",
"type": "NAMI",
"version": "1.14.0-7"
},
"influxdb": {
"arch": "amd64",
"digest": "1aac0d4dd91fbc2e7300235926c3108cf54e90aac72aed6602f6c7fce7286352",
"distro": "debian-10",
"type": "NAMI",
"version": "2.2.0-0"
},
"wait-for-port": {
"arch": "amd64",
"digest": "0603c8eaf6d24e76563431e36e512da06bfebb3a06ede31b3e84d9879213c162",
"distro": "debian-10",
"type": "NAMI",
"version": "1.0.3-0"
}
}

View File

@@ -1,3 +0,0 @@
Bitnami containers ship with software bundles. You can find the licenses under:
/opt/bitnami/nami/COPYING
/opt/bitnami/[name-of-bundle]/licenses/[bundle-version].txt

View File

@@ -1,51 +0,0 @@
#!/bin/bash
#
# Bitnami custom library
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/liblog.sh
# Constants
BOLD='\033[1m'
# Functions
########################
# Print the welcome page
# Globals:
# DISABLE_WELCOME_MESSAGE
# BITNAMI_APP_NAME
# Arguments:
# None
# Returns:
# None
#########################
print_welcome_page() {
if [[ -z "${DISABLE_WELCOME_MESSAGE:-}" ]]; then
if [[ -n "$BITNAMI_APP_NAME" ]]; then
print_image_welcome_page
fi
fi
}
########################
# Print the welcome page for a Bitnami Docker image
# Globals:
# BITNAMI_APP_NAME
# Arguments:
# None
# Returns:
# None
#########################
print_image_welcome_page() {
local github_url="https://github.com/bitnami/bitnami-docker-${BITNAMI_APP_NAME}"
log ""
log "${BOLD}Welcome to the Bitnami ${BITNAMI_APP_NAME} container${RESET}"
log "Subscribe to project updates by watching ${BOLD}${github_url}${RESET}"
log "Submit issues and feature requests at ${BOLD}${github_url}/issues${RESET}"
log ""
}

View File

@@ -1,65 +0,0 @@
#!/bin/bash
#
# Library for managing Bitnami components
# Constants
CACHE_ROOT="/tmp/bitnami/pkg/cache"
DOWNLOAD_URL="https://downloads.bitnami.com/files/stacksmith"
# Functions
########################
# Download and unpack a Bitnami package
# Globals:
# OS_NAME
# OS_ARCH
# OS_FLAVOUR
# Arguments:
# $1 - component's name
# $2 - component's version
# Returns:
# None
#########################
component_unpack() {
local name="${1:?name is required}"
local version="${2:?version is required}"
local base_name="${name}-${version}-${OS_NAME}-${OS_ARCH}-${OS_FLAVOUR}"
local package_sha256=""
local directory="/opt/bitnami"
# Validate arguments
shift 2
while [ "$#" -gt 0 ]; do
case "$1" in
-c|--checksum)
shift
package_sha256="${1:?missing package checksum}"
;;
*)
echo "Invalid command line flag $1" >&2
return 1
;;
esac
shift
done
echo "Downloading $base_name package"
if [ -f "${CACHE_ROOT}/${base_name}.tar.gz" ]; then
echo "${CACHE_ROOT}/${base_name}.tar.gz already exists, skipping download."
cp "${CACHE_ROOT}/${base_name}.tar.gz" .
rm "${CACHE_ROOT}/${base_name}.tar.gz"
if [ -f "${CACHE_ROOT}/${base_name}.tar.gz.sha256" ]; then
echo "Using the local sha256 from ${CACHE_ROOT}/${base_name}.tar.gz.sha256"
package_sha256="$(< "${CACHE_ROOT}/${base_name}.tar.gz.sha256")"
rm "${CACHE_ROOT}/${base_name}.tar.gz.sha256"
fi
else
curl --remote-name --silent --show-error --fail "${DOWNLOAD_URL}/${base_name}.tar.gz"
fi
if [ -n "$package_sha256" ]; then
echo "Verifying package integrity"
echo "$package_sha256 ${base_name}.tar.gz" | sha256sum --check - || exit "$?"
fi
tar --directory "${directory}" --extract --gunzip --file "${base_name}.tar.gz" --no-same-owner --strip-components=2
rm "${base_name}.tar.gz"
}

View File

@@ -1,139 +0,0 @@
#!/bin/bash
#
# Library for managing files
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/libos.sh
# Functions
########################
# Replace a regex-matching string in a file
# Arguments:
# $1 - filename
# $2 - match regex
# $3 - substitute regex
# $4 - use POSIX regex. Default: true
# Returns:
# None
#########################
replace_in_file() {
local filename="${1:?filename is required}"
local match_regex="${2:?match regex is required}"
local substitute_regex="${3:?substitute regex is required}"
local posix_regex=${4:-true}
local result
# We should avoid using 'sed in-place' substitutions
# 1) They are not compatible with files mounted from ConfigMap(s)
# 2) We found incompatibility issues with Debian10 and "in-place" substitutions
local -r del=$'\001' # Use a non-printable character as a 'sed' delimiter to avoid issues
if [[ $posix_regex = true ]]; then
result="$(sed -E "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")"
else
result="$(sed "s${del}${match_regex}${del}${substitute_regex}${del}g" "$filename")"
fi
echo "$result" > "$filename"
}
########################
# Replace a regex-matching multiline string in a file
# Arguments:
# $1 - filename
# $2 - match regex
# $3 - substitute regex
# Returns:
# None
#########################
replace_in_file_multiline() {
local filename="${1:?filename is required}"
local match_regex="${2:?match regex is required}"
local substitute_regex="${3:?substitute regex is required}"
local result
local -r del=$'\001' # Use a non-printable character as a 'sed' delimiter to avoid issues
result="$(perl -pe "BEGIN{undef $/;} s${del}${match_regex}${del}${substitute_regex}${del}sg" "$filename")"
echo "$result" > "$filename"
}
########################
# Remove a line in a file based on a regex
# Arguments:
# $1 - filename
# $2 - match regex
# $3 - use POSIX regex. Default: true
# Returns:
# None
#########################
remove_in_file() {
local filename="${1:?filename is required}"
local match_regex="${2:?match regex is required}"
local posix_regex=${3:-true}
local result
# We should avoid using 'sed in-place' substitutions
# 1) They are not compatible with files mounted from ConfigMap(s)
# 2) We found incompatibility issues with Debian10 and "in-place" substitutions
if [[ $posix_regex = true ]]; then
result="$(sed -E "/$match_regex/d" "$filename")"
else
result="$(sed "/$match_regex/d" "$filename")"
fi
echo "$result" > "$filename"
}
########################
# Appends text after the last line matching a pattern
# Arguments:
# $1 - file
# $2 - match regex
# $3 - contents to add
# Returns:
# None
#########################
append_file_after_last_match() {
local file="${1:?missing file}"
local match_regex="${2:?missing pattern}"
local value="${3:?missing value}"
# We read the file in reverse, replace the first match (0,/pattern/s) and then reverse the results again
result="$(tac "$file" | sed -E "0,/($match_regex)/s||${value}\n\1|" | tac)"
echo "$result" > "$file"
}
########################
# Wait until certain entry is present in a log file
# Arguments:
# $1 - entry to look for
# $2 - log file
# $3 - max retries. Default: 12
# $4 - sleep between retries (in seconds). Default: 5
# Returns:
# Boolean
#########################
wait_for_log_entry() {
local -r entry="${1:-missing entry}"
local -r log_file="${2:-missing log file}"
local -r retries="${3:-12}"
local -r interval_time="${4:-5}"
local attempt=0
check_log_file_for_entry() {
if ! grep -qE "$entry" "$log_file"; then
debug "Entry \"${entry}\" still not present in ${log_file} (attempt $((++attempt))/${retries})"
return 1
fi
}
debug "Checking that ${log_file} log file contains entry \"${entry}\""
if retry_while check_log_file_for_entry "$retries" "$interval_time"; then
debug "Found entry \"${entry}\" in ${log_file}"
true
else
error "Could not find entry \"${entry}\" in ${log_file} after ${retries} retries"
debug_execute cat "$log_file"
return 1
fi
}

View File

@@ -1,190 +0,0 @@
#!/bin/bash
#
# Library for file system actions
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/liblog.sh
# Functions
########################
# Ensure a file/directory is owned (user and group) but the given user
# Arguments:
# $1 - filepath
# $2 - owner
# Returns:
# None
#########################
owned_by() {
local path="${1:?path is missing}"
local owner="${2:?owner is missing}"
local group="${3:-}"
if [[ -n $group ]]; then
chown "$owner":"$group" "$path"
else
chown "$owner":"$owner" "$path"
fi
}
########################
# Ensure a directory exists and, optionally, is owned by the given user
# Arguments:
# $1 - directory
# $2 - owner
# Returns:
# None
#########################
ensure_dir_exists() {
local dir="${1:?directory is missing}"
local owner_user="${2:-}"
local owner_group="${3:-}"
mkdir -p "${dir}"
if [[ -n $owner_user ]]; then
owned_by "$dir" "$owner_user" "$owner_group"
fi
}
########################
# Checks whether a directory is empty or not
# arguments:
# $1 - directory
# returns:
# boolean
#########################
is_dir_empty() {
local -r path="${1:?missing directory}"
# Calculate real path in order to avoid issues with symlinks
local -r dir="$(realpath "$path")"
if [[ ! -e "$dir" ]] || [[ -z "$(ls -A "$dir")" ]]; then
true
else
false
fi
}
########################
# Checks whether a mounted directory is empty or not
# arguments:
# $1 - directory
# returns:
# boolean
#########################
is_mounted_dir_empty() {
local dir="${1:?missing directory}"
if is_dir_empty "$dir" || find "$dir" -mindepth 1 -maxdepth 1 -not -name ".snapshot" -not -name "lost+found" -exec false {} +; then
true
else
false
fi
}
########################
# Checks whether a file can be written to or not
# arguments:
# $1 - file
# returns:
# boolean
#########################
is_file_writable() {
local file="${1:?missing file}"
local dir
dir="$(dirname "$file")"
if [[ (-f "$file" && -w "$file") || (! -f "$file" && -d "$dir" && -w "$dir") ]]; then
true
else
false
fi
}
########################
# Relativize a path
# arguments:
# $1 - path
# $2 - base
# returns:
# None
#########################
relativize() {
local -r path="${1:?missing path}"
local -r base="${2:?missing base}"
pushd "$base" >/dev/null || exit
realpath -q --no-symlinks --relative-base="$base" "$path" | sed -e 's|^/$|.|' -e 's|^/||'
popd >/dev/null || exit
}
########################
# Configure permisions and ownership recursively
# Globals:
# None
# Arguments:
# $1 - paths (as a string).
# Flags:
# -f|--file-mode - mode for directories.
# -d|--dir-mode - mode for files.
# -u|--user - user
# -g|--group - group
# Returns:
# None
#########################
configure_permissions_ownership() {
local -r paths="${1:?paths is missing}"
local dir_mode=""
local file_mode=""
local user=""
local group=""
# Validate arguments
shift 1
while [ "$#" -gt 0 ]; do
case "$1" in
-f | --file-mode)
shift
file_mode="${1:?missing mode for files}"
;;
-d | --dir-mode)
shift
dir_mode="${1:?missing mode for directories}"
;;
-u | --user)
shift
user="${1:?missing user}"
;;
-g | --group)
shift
group="${1:?missing group}"
;;
*)
echo "Invalid command line flag $1" >&2
return 1
;;
esac
shift
done
read -r -a filepaths <<<"$paths"
for p in "${filepaths[@]}"; do
if [[ -e "$p" ]]; then
if [[ -n $dir_mode ]]; then
find -L "$p" -type d -exec chmod "$dir_mode" {} \;
fi
if [[ -n $file_mode ]]; then
find -L "$p" -type f -exec chmod "$file_mode" {} \;
fi
if [[ -n $user ]] && [[ -n $group ]]; then
chown -LR "$user":"$group" "$p"
elif [[ -n $user ]] && [[ -z $group ]]; then
chown -LR "$user" "$p"
elif [[ -z $user ]] && [[ -n $group ]]; then
chgrp -LR "$group" "$p"
fi
else
stderr_print "$p does not exist"
fi
done
}

View File

@@ -1,16 +0,0 @@
#!/bin/bash
#
# Library to use for scripts expected to be used as Kubernetes lifecycle hooks
# shellcheck disable=SC1091
# Load generic libraries
. /opt/bitnami/scripts/liblog.sh
. /opt/bitnami/scripts/libos.sh
# Override functions that log to stdout/stderr of the current process, so they print to process 1
for function_to_override in stderr_print debug_execute; do
# Output is sent to output of process 1 and thus end up in the container log
# The hook output in general isn't saved
eval "$(declare -f "$function_to_override") >/proc/1/fd/1 2>/proc/1/fd/2"
done

View File

@@ -1,112 +0,0 @@
#!/bin/bash
#
# Library for logging functions
# Constants
RESET='\033[0m'
RED='\033[38;5;1m'
GREEN='\033[38;5;2m'
YELLOW='\033[38;5;3m'
MAGENTA='\033[38;5;5m'
CYAN='\033[38;5;6m'
# Functions
########################
# Print to STDERR
# Arguments:
# Message to print
# Returns:
# None
#########################
stderr_print() {
# 'is_boolean_yes' is defined in libvalidations.sh, but depends on this file so we cannot source it
local bool="${BITNAMI_QUIET:-false}"
# comparison is performed without regard to the case of alphabetic characters
shopt -s nocasematch
if ! [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then
printf "%b\\n" "${*}" >&2
fi
}
########################
# Log message
# Arguments:
# Message to log
# Returns:
# None
#########################
log() {
stderr_print "${CYAN}${MODULE:-} ${MAGENTA}$(date "+%T.%2N ")${RESET}${*}"
}
########################
# Log an 'info' message
# Arguments:
# Message to log
# Returns:
# None
#########################
info() {
log "${GREEN}INFO ${RESET} ==> ${*}"
}
########################
# Log message
# Arguments:
# Message to log
# Returns:
# None
#########################
warn() {
log "${YELLOW}WARN ${RESET} ==> ${*}"
}
########################
# Log an 'error' message
# Arguments:
# Message to log
# Returns:
# None
#########################
error() {
log "${RED}ERROR${RESET} ==> ${*}"
}
########################
# Log a 'debug' message
# Globals:
# BITNAMI_DEBUG
# Arguments:
# None
# Returns:
# None
#########################
debug() {
# 'is_boolean_yes' is defined in libvalidations.sh, but depends on this file so we cannot source it
local bool="${BITNAMI_DEBUG:-false}"
# comparison is performed without regard to the case of alphabetic characters
shopt -s nocasematch
if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then
log "${MAGENTA}DEBUG${RESET} ==> ${*}"
fi
}
########################
# Indent a string
# Arguments:
# $1 - string
# $2 - number of indentation characters (default: 4)
# $3 - indentation character (default: " ")
# Returns:
# None
#########################
indent() {
local string="${1:-}"
local num="${2:?missing num}"
local char="${3:-" "}"
# Build the indentation unit string
local indent_unit=""
for ((i = 0; i < num; i++)); do
indent_unit="${indent_unit}${char}"
done
# shellcheck disable=SC2001
# Complex regex, see https://github.com/koalaman/shellcheck/wiki/SC2001#exceptions
echo "$string" | sed "s/^/${indent_unit}/"
}

View File

@@ -1,163 +0,0 @@
#!/bin/bash
#
# Library for network functions
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/liblog.sh
# Functions
########################
# Resolve IP address for a host/domain (i.e. DNS lookup)
# Arguments:
# $1 - Hostname to resolve
# $2 - IP address version (v4, v6), leave empty for resolving to any version
# Returns:
# IP
#########################
dns_lookup() {
local host="${1:?host is missing}"
local ip_version="${2:-}"
getent "ahosts${ip_version}" "$host" | awk '/STREAM/ {print $1 }' | head -n 1
}
#########################
# Wait for a hostname and return the IP
# Arguments:
# $1 - hostname
# $2 - number of retries
# $3 - seconds to wait between retries
# Returns:
# - IP address that corresponds to the hostname
#########################
wait_for_dns_lookup() {
local hostname="${1:?hostname is missing}"
local retries="${2:-5}"
local seconds="${3:-1}"
check_host() {
if [[ $(dns_lookup "$hostname") == "" ]]; then
false
else
true
fi
}
# Wait for the host to be ready
retry_while "check_host ${hostname}" "$retries" "$seconds"
dns_lookup "$hostname"
}
########################
# Get machine's IP
# Arguments:
# None
# Returns:
# Machine IP
#########################
get_machine_ip() {
local -a ip_addresses
local hostname
hostname="$(hostname)"
read -r -a ip_addresses <<< "$(dns_lookup "$hostname" | xargs echo)"
if [[ "${#ip_addresses[@]}" -gt 1 ]]; then
warn "Found more than one IP address associated to hostname ${hostname}: ${ip_addresses[*]}, will use ${ip_addresses[0]}"
elif [[ "${#ip_addresses[@]}" -lt 1 ]]; then
error "Could not find any IP address associated to hostname ${hostname}"
exit 1
fi
echo "${ip_addresses[0]}"
}
########################
# Check if the provided argument is a resolved hostname
# Arguments:
# $1 - Value to check
# Returns:
# Boolean
#########################
is_hostname_resolved() {
local -r host="${1:?missing value}"
if [[ -n "$(dns_lookup "$host")" ]]; then
true
else
false
fi
}
########################
# Parse URL
# Globals:
# None
# Arguments:
# $1 - uri - String
# $2 - component to obtain. Valid options (scheme, authority, userinfo, host, port, path, query or fragment) - String
# Returns:
# String
parse_uri() {
local uri="${1:?uri is missing}"
local component="${2:?component is missing}"
# Solution based on https://tools.ietf.org/html/rfc3986#appendix-B with
# additional sub-expressions to split authority into userinfo, host and port
# Credits to Patryk Obara (see https://stackoverflow.com/a/45977232/6694969)
local -r URI_REGEX='^(([^:/?#]+):)?(//((([^@/?#]+)@)?([^:/?#]+)(:([0-9]+))?))?(/([^?#]*))?(\?([^#]*))?(#(.*))?'
# || | ||| | | | | | | | | |
# |2 scheme | ||6 userinfo 7 host | 9 port | 11 rpath | 13 query | 15 fragment
# 1 scheme: | |5 userinfo@ 8 :... 10 path 12 ?... 14 #...
# | 4 authority
# 3 //...
local index=0
case "$component" in
scheme)
index=2
;;
authority)
index=4
;;
userinfo)
index=6
;;
host)
index=7
;;
port)
index=9
;;
path)
index=10
;;
query)
index=13
;;
fragment)
index=14
;;
*)
stderr_print "unrecognized component $component"
return 1
;;
esac
[[ "$uri" =~ $URI_REGEX ]] && echo "${BASH_REMATCH[${index}]}"
}
########################
# Wait for a HTTP connection to succeed
# Globals:
# *
# Arguments:
# $1 - URL to wait for
# $2 - Maximum amount of retries (optional)
# $3 - Time between retries (optional)
# Returns:
# true if the HTTP connection succeeded, false otherwise
#########################
wait_for_http_connection() {
local url="${1:?missing url}"
local retries="${2:-}"
local sleep_time="${3:-}"
if ! retry_while "debug_execute curl --silent ${url}" "$retries" "$sleep_time"; then
error "Could not connect to ${url}"
return 1
fi
}

View File

@@ -1,466 +0,0 @@
#!/bin/bash
#
# Library for operating system actions
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/liblog.sh
. /opt/bitnami/scripts/libfs.sh
. /opt/bitnami/scripts/libvalidations.sh
# Functions
########################
# Check if an user exists in the system
# Arguments:
# $1 - user
# Returns:
# Boolean
#########################
user_exists() {
local user="${1:?user is missing}"
id "$user" >/dev/null 2>&1
}
########################
# Check if a group exists in the system
# Arguments:
# $1 - group
# Returns:
# Boolean
#########################
group_exists() {
local group="${1:?group is missing}"
getent group "$group" >/dev/null 2>&1
}
########################
# Create a group in the system if it does not exist already
# Arguments:
# $1 - group
# Flags:
# -i|--gid - the ID for the new group
# -s|--system - Whether to create new user as system user (uid <= 999)
# Returns:
# None
#########################
ensure_group_exists() {
local group="${1:?group is missing}"
local gid=""
local is_system_user=false
# Validate arguments
shift 1
while [ "$#" -gt 0 ]; do
case "$1" in
-i | --gid)
shift
gid="${1:?missing gid}"
;;
-s | --system)
is_system_user=true
;;
*)
echo "Invalid command line flag $1" >&2
return 1
;;
esac
shift
done
if ! group_exists "$group"; then
local -a args=("$group")
if [[ -n "$gid" ]]; then
if group_exists "$gid"; then
error "The GID $gid is already in use." >&2
return 1
fi
args+=("--gid" "$gid")
fi
$is_system_user && args+=("--system")
groupadd "${args[@]}" >/dev/null 2>&1
fi
}
########################
# Create an user in the system if it does not exist already
# Arguments:
# $1 - user
# Flags:
# -i|--uid - the ID for the new user
# -g|--group - the group the new user should belong to
# -a|--append-groups - comma-separated list of supplemental groups to append to the new user
# -h|--home - the home directory for the new user
# -s|--system - whether to create new user as system user (uid <= 999)
# Returns:
# None
#########################
ensure_user_exists() {
local user="${1:?user is missing}"
local uid=""
local group=""
local append_groups=""
local home=""
local is_system_user=false
# Validate arguments
shift 1
while [ "$#" -gt 0 ]; do
case "$1" in
-i | --uid)
shift
uid="${1:?missing uid}"
;;
-g | --group)
shift
group="${1:?missing group}"
;;
-a | --append-groups)
shift
append_groups="${1:?missing append_groups}"
;;
-h | --home)
shift
home="${1:?missing home directory}"
;;
-s | --system)
is_system_user=true
;;
*)
echo "Invalid command line flag $1" >&2
return 1
;;
esac
shift
done
if ! user_exists "$user"; then
local -a user_args=("-N" "$user")
if [[ -n "$uid" ]]; then
if user_exists "$uid"; then
error "The UID $uid is already in use."
return 1
fi
user_args+=("--uid" "$uid")
else
$is_system_user && user_args+=("--system")
fi
useradd "${user_args[@]}" >/dev/null 2>&1
fi
if [[ -n "$group" ]]; then
local -a group_args=("$group")
$is_system_user && group_args+=("--system")
ensure_group_exists "${group_args[@]}"
usermod -g "$group" "$user" >/dev/null 2>&1
fi
if [[ -n "$append_groups" ]]; then
local -a groups
read -ra groups <<<"$(tr ',;' ' ' <<<"$append_groups")"
for group in "${groups[@]}"; do
ensure_group_exists "$group"
usermod -aG "$group" "$user" >/dev/null 2>&1
done
fi
if [[ -n "$home" ]]; then
mkdir -p "$home"
usermod -d "$home" "$user" >/dev/null 2>&1
configure_permissions_ownership "$home" -d "775" -f "664" -u "$user" -g "$group"
fi
}
########################
# Check if the script is currently running as root
# Arguments:
# $1 - user
# $2 - group
# Returns:
# Boolean
#########################
am_i_root() {
if [[ "$(id -u)" = "0" ]]; then
true
else
false
fi
}
########################
# Print OS metadata
# Arguments:
# $1 - Flag name
# Flags:
# --id - Distro ID
# --version - Distro version
# --branch - Distro branch
# --codename - Distro codename
# Returns:
# String
#########################
get_os_metadata() {
local -r flag_name="${1:?missing flag}"
# Helper function
get_os_release_metadata() {
local -r env_name="${1:?missing environment variable name}"
(
. /etc/os-release
echo "${!env_name}"
)
}
case "$flag_name" in
--id)
get_os_release_metadata ID
;;
--version)
get_os_release_metadata VERSION_ID
;;
--branch)
get_os_release_metadata VERSION_ID | sed 's/\..*//'
;;
--codename)
get_os_release_metadata VERSION_CODENAME
;;
*)
error "Unknown flag ${flag_name}"
return 1
;;
esac
}
########################
# Get total memory available
# Arguments:
# None
# Returns:
# Memory in bytes
#########################
get_total_memory() {
echo $(($(grep MemTotal /proc/meminfo | awk '{print $2}') / 1024))
}
########################
# Get machine size depending on specified memory
# Globals:
# None
# Arguments:
# None
# Flags:
# --memory - memory size (optional)
# Returns:
# Detected instance size
#########################
get_machine_size() {
local memory=""
# Validate arguments
while [[ "$#" -gt 0 ]]; do
case "$1" in
--memory)
shift
memory="${1:?missing memory}"
;;
*)
echo "Invalid command line flag $1" >&2
return 1
;;
esac
shift
done
if [[ -z "$memory" ]]; then
debug "Memory was not specified, detecting available memory automatically"
memory="$(get_total_memory)"
fi
sanitized_memory=$(convert_to_mb "$memory")
if [[ "$sanitized_memory" -gt 26000 ]]; then
echo 2xlarge
elif [[ "$sanitized_memory" -gt 13000 ]]; then
echo xlarge
elif [[ "$sanitized_memory" -gt 6000 ]]; then
echo large
elif [[ "$sanitized_memory" -gt 3000 ]]; then
echo medium
elif [[ "$sanitized_memory" -gt 1500 ]]; then
echo small
else
echo micro
fi
}
########################
# Get machine size depending on specified memory
# Globals:
# None
# Arguments:
# $1 - memory size (optional)
# Returns:
# Detected instance size
#########################
get_supported_machine_sizes() {
echo micro small medium large xlarge 2xlarge
}
########################
# Convert memory size from string to amount of megabytes (i.e. 2G -> 2048)
# Globals:
# None
# Arguments:
# $1 - memory size
# Returns:
# Result of the conversion
#########################
convert_to_mb() {
local amount="${1:-}"
if [[ $amount =~ ^([0-9]+)(m|M|g|G) ]]; then
size="${BASH_REMATCH[1]}"
unit="${BASH_REMATCH[2]}"
if [[ "$unit" = "g" || "$unit" = "G" ]]; then
amount="$((size * 1024))"
else
amount="$size"
fi
fi
echo "$amount"
}
#########################
# Redirects output to /dev/null if debug mode is disabled
# Globals:
# BITNAMI_DEBUG
# Arguments:
# $@ - Command to execute
# Returns:
# None
#########################
debug_execute() {
if is_boolean_yes "${BITNAMI_DEBUG:-false}"; then
"$@"
else
"$@" >/dev/null 2>&1
fi
}
########################
# Retries a command a given number of times
# Arguments:
# $1 - cmd (as a string)
# $2 - max retries. Default: 12
# $3 - sleep between retries (in seconds). Default: 5
# Returns:
# Boolean
#########################
retry_while() {
local cmd="${1:?cmd is missing}"
local retries="${2:-12}"
local sleep_time="${3:-5}"
local return_value=1
read -r -a command <<<"$cmd"
for ((i = 1; i <= retries; i += 1)); do
"${command[@]}" && return_value=0 && break
sleep "$sleep_time"
done
return $return_value
}
########################
# Generate a random string
# Arguments:
# -t|--type - String type (ascii, alphanumeric, numeric), defaults to ascii
# -c|--count - Number of characters, defaults to 32
# Arguments:
# None
# Returns:
# None
# Returns:
# String
#########################
generate_random_string() {
local type="ascii"
local count="32"
local filter
local result
# Validate arguments
while [[ "$#" -gt 0 ]]; do
case "$1" in
-t | --type)
shift
type="$1"
;;
-c | --count)
shift
count="$1"
;;
*)
echo "Invalid command line flag $1" >&2
return 1
;;
esac
shift
done
# Validate type
case "$type" in
ascii)
filter="[:print:]"
;;
alphanumeric)
filter="a-zA-Z0-9"
;;
numeric)
filter="0-9"
;;
*)
echo "Invalid type ${type}" >&2
return 1
;;
esac
# Obtain count + 10 lines from /dev/urandom to ensure that the resulting string has the expected size
# Note there is a very small chance of strings starting with EOL character
# Therefore, the higher amount of lines read, this will happen less frequently
result="$(head -n "$((count + 10))" /dev/urandom | tr -dc "$filter" | head -c "$count")"
echo "$result"
}
########################
# Create md5 hash from a string
# Arguments:
# $1 - string
# Returns:
# md5 hash - string
#########################
generate_md5_hash() {
local -r str="${1:?missing input string}"
echo -n "$str" | md5sum | awk '{print $1}'
}
########################
# Create sha1 hash from a string
# Arguments:
# $1 - string
# $2 - algorithm - 1 (default), 224, 256, 384, 512
# Returns:
# sha1 hash - string
#########################
generate_sha_hash() {
local -r str="${1:?missing input string}"
local -r algorithm="${2:-1}"
echo -n "$str" | "sha${algorithm}sum" | awk '{print $1}'
}
########################
# Converts a string to its hexadecimal representation
# Arguments:
# $1 - string
# Returns:
# hexadecimal representation of the string
#########################
convert_to_hex() {
local -r str=${1:?missing input string}
local -i iterator
local char
for ((iterator = 0; iterator < ${#str}; iterator++)); do
char=${str:iterator:1}
printf '%x' "'${char}"
done
}

View File

@@ -1,122 +0,0 @@
#!/bin/bash
#
# Bitnami persistence library
# Used for bringing persistence capabilities to applications that don't have clear separation of data and logic
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/libfs.sh
. /opt/bitnami/scripts/libos.sh
. /opt/bitnami/scripts/liblog.sh
. /opt/bitnami/scripts/libversion.sh
# Functions
########################
# Persist an application directory
# Globals:
# BITNAMI_ROOT_DIR
# BITNAMI_VOLUME_DIR
# Arguments:
# $1 - App folder name
# $2 - List of app files to persist
# Returns:
# true if all steps succeeded, false otherwise
#########################
persist_app() {
local -r app="${1:?missing app}"
local -a files_to_restore
read -r -a files_to_persist <<< "$(tr ',;:' ' ' <<< "$2")"
local -r install_dir="${BITNAMI_ROOT_DIR}/${app}"
local -r persist_dir="${BITNAMI_VOLUME_DIR}/${app}"
# Persist the individual files
if [[ "${#files_to_persist[@]}" -le 0 ]]; then
warn "No files are configured to be persisted"
return
fi
pushd "$install_dir" >/dev/null || exit
local file_to_persist_relative file_to_persist_destination file_to_persist_destination_folder
local -r tmp_file="/tmp/perms.acl"
for file_to_persist in "${files_to_persist[@]}"; do
if [[ ! -f "$file_to_persist" && ! -d "$file_to_persist" ]]; then
error "Cannot persist '${file_to_persist}' because it does not exist"
return 1
fi
file_to_persist_relative="$(relativize "$file_to_persist" "$install_dir")"
file_to_persist_destination="${persist_dir}/${file_to_persist_relative}"
file_to_persist_destination_folder="$(dirname "$file_to_persist_destination")"
# Get original permissions for existing files, which will be applied later
# Exclude the root directory with 'sed', to avoid issues when copying the entirety of it to a volume
getfacl -R "$file_to_persist_relative" | sed -E '/# file: (\..+|[^.])/,$!d' > "$tmp_file"
# Copy directories to the volume
ensure_dir_exists "$file_to_persist_destination_folder"
cp -Lr --preserve=links "$file_to_persist_relative" "$file_to_persist_destination_folder"
# Restore permissions
pushd "$persist_dir" >/dev/null || exit
if am_i_root; then
setfacl --restore="$tmp_file"
else
# When running as non-root, don't change ownership
setfacl --restore=<(grep -E -v '^# (owner|group):' "$tmp_file")
fi
popd >/dev/null || exit
done
popd >/dev/null || exit
rm -f "$tmp_file"
# Install the persisted files into the installation directory, via symlinks
restore_persisted_app "$@"
}
########################
# Restore a persisted application directory
# Globals:
# BITNAMI_ROOT_DIR
# BITNAMI_VOLUME_DIR
# FORCE_MAJOR_UPGRADE
# Arguments:
# $1 - App folder name
# $2 - List of app files to restore
# Returns:
# true if all steps succeeded, false otherwise
#########################
restore_persisted_app() {
local -r app="${1:?missing app}"
local -a files_to_restore
read -r -a files_to_restore <<< "$(tr ',;:' ' ' <<< "$2")"
local -r install_dir="${BITNAMI_ROOT_DIR}/${app}"
local -r persist_dir="${BITNAMI_VOLUME_DIR}/${app}"
# Restore the individual persisted files
if [[ "${#files_to_restore[@]}" -le 0 ]]; then
warn "No persisted files are configured to be restored"
return
fi
local file_to_restore_relative file_to_restore_origin file_to_restore_destination
for file_to_restore in "${files_to_restore[@]}"; do
file_to_restore_relative="$(relativize "$file_to_restore" "$install_dir")"
# We use 'realpath --no-symlinks' to ensure that the case of '.' is covered and the directory is removed
file_to_restore_origin="$(realpath --no-symlinks "${install_dir}/${file_to_restore_relative}")"
file_to_restore_destination="$(realpath --no-symlinks "${persist_dir}/${file_to_restore_relative}")"
rm -rf "$file_to_restore_origin"
ln -sfn "$file_to_restore_destination" "$file_to_restore_origin"
done
}
########################
# Check if an application directory was already persisted
# Globals:
# BITNAMI_VOLUME_DIR
# Arguments:
# $1 - App folder name
# Returns:
# true if all steps succeeded, false otherwise
#########################
is_app_initialized() {
local -r app="${1:?missing app}"
local -r persist_dir="${BITNAMI_VOLUME_DIR}/${app}"
if ! is_mounted_dir_empty "$persist_dir"; then
true
else
false
fi
}

View File

@@ -1,273 +0,0 @@
#!/bin/bash
#
# Library for managing services
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/libvalidations.sh
. /opt/bitnami/scripts/liblog.sh
# Functions
########################
# Read the provided pid file and returns a PID
# Arguments:
# $1 - Pid file
# Returns:
# PID
#########################
get_pid_from_file() {
local pid_file="${1:?pid file is missing}"
if [[ -f "$pid_file" ]]; then
if [[ -n "$(< "$pid_file")" ]] && [[ "$(< "$pid_file")" -gt 0 ]]; then
echo "$(< "$pid_file")"
fi
fi
}
########################
# Check if a provided PID corresponds to a running service
# Arguments:
# $1 - PID
# Returns:
# Boolean
#########################
is_service_running() {
local pid="${1:?pid is missing}"
kill -0 "$pid" 2>/dev/null
}
########################
# Stop a service by sending a termination signal to its pid
# Arguments:
# $1 - Pid file
# $2 - Signal number (optional)
# Returns:
# None
#########################
stop_service_using_pid() {
local pid_file="${1:?pid file is missing}"
local signal="${2:-}"
local pid
pid="$(get_pid_from_file "$pid_file")"
[[ -z "$pid" ]] || ! is_service_running "$pid" && return
if [[ -n "$signal" ]]; then
kill "-${signal}" "$pid"
else
kill "$pid"
fi
local counter=10
while [[ "$counter" -ne 0 ]] && is_service_running "$pid"; do
sleep 1
counter=$((counter - 1))
done
}
########################
# Start cron daemon
# Arguments:
# None
# Returns:
# true if started correctly, false otherwise
#########################
cron_start() {
if [[ -x "/usr/sbin/cron" ]]; then
/usr/sbin/cron
elif [[ -x "/usr/sbin/crond" ]]; then
/usr/sbin/crond
else
false
fi
}
########################
# Generate a cron configuration file for a given service
# Arguments:
# $1 - Service name
# $2 - Command
# Flags:
# --run-as - User to run as (default: root)
# --schedule - Cron schedule configuration (default: * * * * *)
# Returns:
# None
#########################
generate_cron_conf() {
local service_name="${1:?service name is missing}"
local cmd="${2:?command is missing}"
local run_as="root"
local schedule="* * * * *"
local clean="true"
local clean="true"
# Parse optional CLI flags
shift 2
while [[ "$#" -gt 0 ]]; do
case "$1" in
--run-as)
shift
run_as="$1"
;;
--schedule)
shift
schedule="$1"
;;
--no-clean)
clean="false"
;;
*)
echo "Invalid command line flag ${1}" >&2
return 1
;;
esac
shift
done
mkdir -p /etc/cron.d
if "$clean"; then
echo "${schedule} ${run_as} ${cmd}" > /etc/cron.d/"$service_name"
else
echo "${schedule} ${run_as} ${cmd}" >> /etc/cron.d/"$service_name"
fi
}
########################
# Remove a cron configuration file for a given service
# Arguments:
# $1 - Service name
# Returns:
# None
#########################
remove_cron_conf() {
local service_name="${1:?service name is missing}"
local cron_conf_dir="/etc/monit/conf.d"
rm -f "${cron_conf_dir}/${service_name}"
}
########################
# Generate a monit configuration file for a given service
# Arguments:
# $1 - Service name
# $2 - Pid file
# $3 - Start command
# $4 - Stop command
# Flags:
# --disable - Whether to disable the monit configuration
# Returns:
# None
#########################
generate_monit_conf() {
local service_name="${1:?service name is missing}"
local pid_file="${2:?pid file is missing}"
local start_command="${3:?start command is missing}"
local stop_command="${4:?stop command is missing}"
local monit_conf_dir="/etc/monit/conf.d"
local disabled="no"
# Parse optional CLI flags
shift 4
while [[ "$#" -gt 0 ]]; do
case "$1" in
--disable)
disabled="yes"
;;
*)
echo "Invalid command line flag ${1}" >&2
return 1
;;
esac
shift
done
is_boolean_yes "$disabled" && conf_suffix=".disabled"
mkdir -p "$monit_conf_dir"
cat >"${monit_conf_dir}/${service_name}.conf${conf_suffix:-}" <<EOF
check process ${service_name}
with pidfile "${pid_file}"
start program = "${start_command}" with timeout 90 seconds
stop program = "${stop_command}" with timeout 90 seconds
EOF
}
########################
# Remove a monit configuration file for a given service
# Arguments:
# $1 - Service name
# Returns:
# None
#########################
remove_monit_conf() {
local service_name="${1:?service name is missing}"
local monit_conf_dir="/etc/monit/conf.d"
rm -f "${monit_conf_dir}/${service_name}.conf"
}
########################
# Generate a logrotate configuration file
# Arguments:
# $1 - Service name
# $2 - Log files pattern
# Flags:
# --period - Period
# --rotations - Number of rotations to store
# --extra - Extra options (Optional)
# Returns:
# None
#########################
generate_logrotate_conf() {
local service_name="${1:?service name is missing}"
local log_path="${2:?log path is missing}"
local period="weekly"
local rotations="150"
local extra=""
local logrotate_conf_dir="/etc/logrotate.d"
local var_name
# Parse optional CLI flags
shift 2
while [[ "$#" -gt 0 ]]; do
case "$1" in
--period|--rotations|--extra)
var_name="$(echo "$1" | sed -e "s/^--//" -e "s/-/_/g")"
shift
declare "$var_name"="${1:?"$var_name" is missing}"
;;
*)
echo "Invalid command line flag ${1}" >&2
return 1
;;
esac
shift
done
mkdir -p "$logrotate_conf_dir"
cat <<EOF | sed '/^\s*$/d' >"${logrotate_conf_dir}/${service_name}"
${log_path} {
${period}
rotate ${rotations}
dateext
compress
copytruncate
missingok
$(indent "$extra" 2)
}
EOF
}
########################
# Remove a logrotate configuration file
# Arguments:
# $1 - Service name
# Returns:
# None
#########################
remove_logrotate_conf() {
local service_name="${1:?service name is missing}"
local logrotate_conf_dir="/etc/logrotate.d"
rm -f "${logrotate_conf_dir}/${service_name}"
}

View File

@@ -1,264 +0,0 @@
#!/bin/bash
#
# Validation functions library
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/liblog.sh
# Functions
########################
# Check if the provided argument is an integer
# Arguments:
# $1 - Value to check
# Returns:
# Boolean
#########################
is_int() {
local -r int="${1:?missing value}"
if [[ "$int" =~ ^-?[0-9]+ ]]; then
true
else
false
fi
}
########################
# Check if the provided argument is a positive integer
# Arguments:
# $1 - Value to check
# Returns:
# Boolean
#########################
is_positive_int() {
local -r int="${1:?missing value}"
if is_int "$int" && (( "${int}" >= 0 )); then
true
else
false
fi
}
########################
# Check if the provided argument is a boolean or is the string 'yes/true'
# Arguments:
# $1 - Value to check
# Returns:
# Boolean
#########################
is_boolean_yes() {
local -r bool="${1:-}"
# comparison is performed without regard to the case of alphabetic characters
shopt -s nocasematch
if [[ "$bool" = 1 || "$bool" =~ ^(yes|true)$ ]]; then
true
else
false
fi
}
########################
# Check if the provided argument is a boolean yes/no value
# Arguments:
# $1 - Value to check
# Returns:
# Boolean
#########################
is_yes_no_value() {
local -r bool="${1:-}"
if [[ "$bool" =~ ^(yes|no)$ ]]; then
true
else
false
fi
}
########################
# Check if the provided argument is a boolean true/false value
# Arguments:
# $1 - Value to check
# Returns:
# Boolean
#########################
is_true_false_value() {
local -r bool="${1:-}"
if [[ "$bool" =~ ^(true|false)$ ]]; then
true
else
false
fi
}
########################
# Check if the provided argument is a boolean 1/0 value
# Arguments:
# $1 - Value to check
# Returns:
# Boolean
#########################
is_1_0_value() {
local -r bool="${1:-}"
if [[ "$bool" =~ ^[10]$ ]]; then
true
else
false
fi
}
########################
# Check if the provided argument is an empty string or not defined
# Arguments:
# $1 - Value to check
# Returns:
# Boolean
#########################
is_empty_value() {
local -r val="${1:-}"
if [[ -z "$val" ]]; then
true
else
false
fi
}
########################
# Validate if the provided argument is a valid port
# Arguments:
# $1 - Port to validate
# Returns:
# Boolean and error message
#########################
validate_port() {
local value
local unprivileged=0
# Parse flags
while [[ "$#" -gt 0 ]]; do
case "$1" in
-unprivileged)
unprivileged=1
;;
--)
shift
break
;;
-*)
stderr_print "unrecognized flag $1"
return 1
;;
*)
break
;;
esac
shift
done
if [[ "$#" -gt 1 ]]; then
echo "too many arguments provided"
return 2
elif [[ "$#" -eq 0 ]]; then
stderr_print "missing port argument"
return 1
else
value=$1
fi
if [[ -z "$value" ]]; then
echo "the value is empty"
return 1
else
if ! is_int "$value"; then
echo "value is not an integer"
return 2
elif [[ "$value" -lt 0 ]]; then
echo "negative value provided"
return 2
elif [[ "$value" -gt 65535 ]]; then
echo "requested port is greater than 65535"
return 2
elif [[ "$unprivileged" = 1 && "$value" -lt 1024 ]]; then
echo "privileged port requested"
return 3
fi
fi
}
########################
# Validate if the provided argument is a valid IPv4 address
# Arguments:
# $1 - IP to validate
# Returns:
# Boolean
#########################
validate_ipv4() {
local ip="${1:?ip is missing}"
local stat=1
if [[ $ip =~ ^[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}\.[0-9]{1,3}$ ]]; then
read -r -a ip_array <<< "$(tr '.' ' ' <<< "$ip")"
[[ ${ip_array[0]} -le 255 && ${ip_array[1]} -le 255 \
&& ${ip_array[2]} -le 255 && ${ip_array[3]} -le 255 ]]
stat=$?
fi
return $stat
}
########################
# Validate a string format
# Arguments:
# $1 - String to validate
# Returns:
# Boolean
#########################
validate_string() {
local string
local min_length=-1
local max_length=-1
# Parse flags
while [ "$#" -gt 0 ]; do
case "$1" in
-min-length)
shift
min_length=${1:-}
;;
-max-length)
shift
max_length=${1:-}
;;
--)
shift
break
;;
-*)
stderr_print "unrecognized flag $1"
return 1
;;
*)
break
;;
esac
shift
done
if [ "$#" -gt 1 ]; then
stderr_print "too many arguments provided"
return 2
elif [ "$#" -eq 0 ]; then
stderr_print "missing string"
return 1
else
string=$1
fi
if [[ "$min_length" -ge 0 ]] && [[ "${#string}" -lt "$min_length" ]]; then
echo "string length is less than $min_length"
return 1
fi
if [[ "$max_length" -ge 0 ]] && [[ "${#string}" -gt "$max_length" ]]; then
echo "string length is great than $max_length"
return 1
fi
}

View File

@@ -1,49 +0,0 @@
#!/bin/bash
#
# Library for managing versions strings
# shellcheck disable=SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/liblog.sh
# Functions
########################
# Gets semantic version
# Arguments:
# $1 - version: string to extract major.minor.patch
# $2 - section: 1 to extract major, 2 to extract minor, 3 to extract patch
# Returns:
# array with the major, minor and release
#########################
get_sematic_version () {
local version="${1:?version is required}"
local section="${2:?section is required}"
local -a version_sections
#Regex to parse versions: x.y.z
local -r regex='([0-9]+)(\.([0-9]+)(\.([0-9]+))?)?'
if [[ "$version" =~ $regex ]]; then
local i=1
local j=1
local n=${#BASH_REMATCH[*]}
while [[ $i -lt $n ]]; do
if [[ -n "${BASH_REMATCH[$i]}" ]] && [[ "${BASH_REMATCH[$i]:0:1}" != '.' ]]; then
version_sections[$j]=${BASH_REMATCH[$i]}
((j++))
fi
((i++))
done
local number_regex='^[0-9]+$'
if [[ "$section" =~ $number_regex ]] && (( section > 0 )) && (( section <= 3 )); then
echo "${version_sections[$section]}"
return
else
stderr_print "Section allowed values are: 1, 2, and 3"
return 1
fi
fi
}

View File

@@ -1,458 +0,0 @@
#!/bin/bash
#
# Bitnami web server handler library
# shellcheck disable=SC1090,SC1091
# Load generic libraries
. /opt/bitnami/scripts/liblog.sh
########################
# Execute a command (or list of commands) with the web server environment and library loaded
# Globals:
# *
# Arguments:
# None
# Returns:
# None
#########################
web_server_execute() {
local -r web_server="${1:?missing web server}"
shift
# Run program in sub-shell to avoid web server environment getting loaded when not necessary
(
. "/opt/bitnami/scripts/lib${web_server}.sh"
. "/opt/bitnami/scripts/${web_server}-env.sh"
"$@"
)
}
########################
# Prints the list of enabled web servers
# Globals:
# None
# Arguments:
# None
# Returns:
# None
#########################
web_server_list() {
local -r -a supported_web_servers=(apache nginx)
local -a existing_web_servers=()
for web_server in "${supported_web_servers[@]}"; do
[[ -f "/opt/bitnami/scripts/${web_server}-env.sh" ]] && existing_web_servers+=("$web_server")
done
echo "${existing_web_servers[@]:-}"
}
########################
# Prints the currently-enabled web server type (only one, in order of preference)
# Globals:
# None
# Arguments:
# None
# Returns:
# None
#########################
web_server_type() {
local -a web_servers
read -r -a web_servers <<< "$(web_server_list)"
echo "${web_servers[0]:-}"
}
########################
# Validate that a supported web server is configured
# Globals:
# None
# Arguments:
# None
# Returns:
# None
#########################
web_server_validate() {
local error_code=0
local supported_web_servers=("apache" "nginx")
# Auxiliary functions
print_validation_error() {
error "$1"
error_code=1
}
if [[ -z "$(web_server_type)" || ! " ${supported_web_servers[*]} " == *" $(web_server_type) "* ]]; then
print_validation_error "Could not detect any supported web servers. It must be one of: ${supported_web_servers[*]}"
elif ! web_server_execute "$(web_server_type)" type -t "is_$(web_server_type)_running" >/dev/null; then
print_validation_error "Could not load the $(web_server_type) web server library from /opt/bitnami/scripts. Check that it exists and is readable."
fi
return "$error_code"
}
########################
# Check whether the web server is running
# Globals:
# *
# Arguments:
# None
# Returns:
# true if the web server is running, false otherwise
#########################
is_web_server_running() {
"is_$(web_server_type)_running"
}
########################
# Start web server
# Globals:
# *
# Arguments:
# None
# Returns:
# None
#########################
web_server_start() {
info "Starting $(web_server_type) in background"
"${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/start.sh"
}
########################
# Stop web server
# Globals:
# *
# Arguments:
# None
# Returns:
# None
#########################
web_server_stop() {
info "Stopping $(web_server_type)"
"${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/stop.sh"
}
########################
# Restart web server
# Globals:
# *
# Arguments:
# None
# Returns:
# None
#########################
web_server_restart() {
info "Restarting $(web_server_type)"
"${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/restart.sh"
}
########################
# Reload web server
# Globals:
# *
# Arguments:
# None
# Returns:
# None
#########################
web_server_reload() {
"${BITNAMI_ROOT_DIR}/scripts/$(web_server_type)/reload.sh"
}
########################
# Ensure a web server application configuration exists (i.e. Apache virtual host format or NGINX server block)
# It serves as a wrapper for the specific web server function
# Globals:
# *
# Arguments:
# $1 - App name
# Flags:
# --type - Application type, which has an effect on which configuration template to use
# --hosts - Host listen addresses
# --server-name - Server name
# --server-aliases - Server aliases
# --allow-remote-connections - Whether to allow remote connections or to require local connections
# --disable - Whether to render server configurations with a .disabled prefix
# --disable-http - Whether to render the app's HTTP server configuration with a .disabled prefix
# --disable-https - Whether to render the app's HTTPS server configuration with a .disabled prefix
# --http-port - HTTP port number
# --https-port - HTTPS port number
# --document-root - Path to document root directory
# Apache-specific flags:
# --apache-additional-configuration - Additional vhost configuration (no default)
# --apache-additional-http-configuration - Additional HTTP vhost configuration (no default)
# --apache-additional-https-configuration - Additional HTTPS vhost configuration (no default)
# --apache-before-vhost-configuration - Configuration to add before the <VirtualHost> directive (no default)
# --apache-allow-override - Whether to allow .htaccess files (only allowed when --move-htaccess is set to 'no' and type is not defined)
# --apache-extra-directory-configuration - Extra configuration for the document root directory
# --apache-proxy-address - Address where to proxy requests
# --apache-proxy-configuration - Extra configuration for the proxy
# --apache-proxy-http-configuration - Extra configuration for the proxy HTTP vhost
# --apache-proxy-https-configuration - Extra configuration for the proxy HTTPS vhost
# --apache-move-htaccess - Move .htaccess files to a common place so they can be loaded during Apache startup (only allowed when type is not defined)
# NGINX-specific flags:
# --nginx-additional-configuration - Additional server block configuration (no default)
# --nginx-external-configuration - Configuration external to server block (no default)
# Returns:
# true if the configuration was enabled, false otherwise
########################
ensure_web_server_app_configuration_exists() {
local app="${1:?missing app}"
shift
local -a apache_args nginx_args web_servers args_var
apache_args=("$app")
nginx_args=("$app")
# Validate arguments
while [[ "$#" -gt 0 ]]; do
case "$1" in
# Common flags
--disable \
| --disable-http \
| --disable-https \
)
apache_args+=("$1")
nginx_args+=("$1")
;;
--hosts \
| --server-name \
| --server-aliases \
| --type \
| --allow-remote-connections \
| --http-port \
| --https-port \
| --document-root \
)
apache_args+=("$1" "${2:?missing value}")
nginx_args+=("$1" "${2:?missing value}")
shift
;;
# Specific Apache flags
--apache-additional-configuration \
| --apache-additional-http-configuration \
| --apache-additional-https-configuration \
| --apache-before-vhost-configuration \
| --apache-allow-override \
| --apache-extra-directory-configuration \
| --apache-proxy-address \
| --apache-proxy-configuration \
| --apache-proxy-http-configuration \
| --apache-proxy-https-configuration \
| --apache-move-htaccess \
)
apache_args+=("${1//apache-/}" "${2:?missing value}")
shift
;;
# Specific NGINX flags
--nginx-additional-configuration \
| --nginx-external-configuration)
nginx_args+=("${1//nginx-/}" "${2:?missing value}")
shift
;;
*)
echo "Invalid command line flag $1" >&2
return 1
;;
esac
shift
done
read -r -a web_servers <<< "$(web_server_list)"
for web_server in "${web_servers[@]}"; do
args_var="${web_server}_args[@]"
web_server_execute "$web_server" "ensure_${web_server}_app_configuration_exists" "${!args_var}"
done
}
########################
# Ensure a web server application configuration does not exist anymore (i.e. Apache virtual host format or NGINX server block)
# It serves as a wrapper for the specific web server function
# Globals:
# *
# Arguments:
# $1 - App name
# Returns:
# true if the configuration was disabled, false otherwise
########################
ensure_web_server_app_configuration_not_exists() {
local app="${1:?missing app}"
local -a web_servers
read -r -a web_servers <<< "$(web_server_list)"
for web_server in "${web_servers[@]}"; do
web_server_execute "$web_server" "ensure_${web_server}_app_configuration_not_exists" "$app"
done
}
########################
# Ensure the web server loads the configuration for an application in a URL prefix
# It serves as a wrapper for the specific web server function
# Globals:
# *
# Arguments:
# $1 - App name
# Flags:
# --allow-remote-connections - Whether to allow remote connections or to require local connections
# --document-root - Path to document root directory
# --prefix - URL prefix from where it will be accessible (i.e. /myapp)
# --type - Application type, which has an effect on what configuration template will be used
# Apache-specific flags:
# --apache-additional-configuration - Additional vhost configuration (no default)
# --apache-allow-override - Whether to allow .htaccess files (only allowed when --move-htaccess is set to 'no')
# --apache-extra-directory-configuration - Extra configuration for the document root directory
# --apache-move-htaccess - Move .htaccess files to a common place so they can be loaded during Apache startup
# NGINX-specific flags:
# --nginx-additional-configuration - Additional server block configuration (no default)
# Returns:
# true if the configuration was enabled, false otherwise
########################
ensure_web_server_prefix_configuration_exists() {
local app="${1:?missing app}"
shift
local -a apache_args nginx_args web_servers args_var
apache_args=("$app")
nginx_args=("$app")
# Validate arguments
while [[ "$#" -gt 0 ]]; do
case "$1" in
# Common flags
--allow-remote-connections \
| --document-root \
| --prefix \
| --type \
)
apache_args+=("$1" "${2:?missing value}")
nginx_args+=("$1" "${2:?missing value}")
shift
;;
# Specific Apache flags
--apache-additional-configuration \
| --apache-allow-override \
| --apache-extra-directory-configuration \
| --apache-move-htaccess \
)
apache_args+=("${1//apache-/}" "$2")
shift
;;
# Specific NGINX flags
--nginx-additional-configuration)
nginx_args+=("${1//nginx-/}" "$2")
shift
;;
*)
echo "Invalid command line flag $1" >&2
return 1
;;
esac
shift
done
read -r -a web_servers <<< "$(web_server_list)"
for web_server in "${web_servers[@]}"; do
args_var="${web_server}_args[@]"
web_server_execute "$web_server" "ensure_${web_server}_prefix_configuration_exists" "${!args_var}"
done
}
########################
# Ensure a web server application configuration is updated with the runtime configuration (i.e. ports)
# It serves as a wrapper for the specific web server function
# Globals:
# *
# Arguments:
# $1 - App name
# Flags:
# --hosts - Host listen addresses
# --server-name - Server name
# --server-aliases - Server aliases
# --enable-http - Enable HTTP app configuration (if not enabled already)
# --enable-https - Enable HTTPS app configuration (if not enabled already)
# --disable-http - Disable HTTP app configuration (if not disabled already)
# --disable-https - Disable HTTPS app configuration (if not disabled already)
# --http-port - HTTP port number
# --https-port - HTTPS port number
# Returns:
# true if the configuration was updated, false otherwise
########################
web_server_update_app_configuration() {
local app="${1:?missing app}"
shift
local -a args web_servers
args=("$app")
# Validate arguments
while [[ "$#" -gt 0 ]]; do
case "$1" in
# Common flags
--enable-http \
| --enable-https \
| --disable-http \
| --disable-https \
)
args+=("$1")
;;
--hosts \
| --server-name \
| --server-aliases \
| --http-port \
| --https-port \
)
args+=("$1" "${2:?missing value}")
shift
;;
*)
echo "Invalid command line flag $1" >&2
return 1
;;
esac
shift
done
read -r -a web_servers <<< "$(web_server_list)"
for web_server in "${web_servers[@]}"; do
web_server_execute "$web_server" "${web_server}_update_app_configuration" "${args[@]}"
done
}
########################
# Enable loading page, which shows users that the initialization process is not yet completed
# Globals:
# *
# Arguments:
# None
# Returns:
# None
#########################
web_server_enable_loading_page() {
ensure_web_server_app_configuration_exists "__loading" --hosts "_default_" \
--apache-additional-configuration "
# Show a HTTP 503 Service Unavailable page by default
RedirectMatch 503 ^/$
# Show index.html if server is answering with 404 Not Found or 503 Service Unavailable status codes
ErrorDocument 404 /index.html
ErrorDocument 503 /index.html" \
--nginx-additional-configuration "
# Show a HTTP 503 Service Unavailable page by default
location / {
return 503;
}
# Show index.html if server is answering with 404 Not Found or 503 Service Unavailable status codes
error_page 404 @installing;
error_page 503 @installing;
location @installing {
rewrite ^(.*)$ /index.html break;
}"
web_server_reload
}
########################
# Enable loading page, which shows users that the initialization process is not yet completed
# Globals:
# *
# Arguments:
# None
# Returns:
# None
#########################
web_server_disable_install_page() {
ensure_web_server_app_configuration_not_exists "__loading"
web_server_reload
}

View File

@@ -1,24 +0,0 @@
#!/bin/sh
set -e
set -u
export DEBIAN_FRONTEND=noninteractive
n=0
max=2
until [ $n -gt $max ]; do
set +e
(
apt-get update -qq &&
apt-get install -y --no-install-recommends "$@"
)
CODE=$?
set -e
if [ $CODE -eq 0 ]; then
break
fi
if [ $n -eq $max ]; then
exit $CODE
fi
echo "apt failed, retrying"
n=$(($n + 1))
done
rm -r /var/lib/apt/lists /var/cache/apt/archives

View File

@@ -1,27 +0,0 @@
#!/bin/bash
# shellcheck disable=SC1091
set -o errexit
set -o nounset
set -o pipefail
# set -o xtrace # Uncomment this line for debugging purposes
# Load libraries
. /opt/bitnami/scripts/liblog.sh
. /opt/bitnami/scripts/libbitnami.sh
. /opt/bitnami/scripts/libinfluxdb.sh
# Load InfluxDB environment variables
eval "$(influxdb_env)"
print_welcome_page
if [[ "$*" = *"/opt/bitnami/scripts/influxdb/run.sh"* ]]; then
info "** Starting InfluxDB setup **"
/opt/bitnami/scripts/influxdb/setup.sh
info "** InfluxDB setup finished! **"
fi
echo ""
exec "$@"

View File

@@ -1,18 +0,0 @@
#!/bin/bash
# shellcheck disable=SC1091
# Load libraries
. /opt/bitnami/scripts/libfs.sh
. /opt/bitnami/scripts/libinfluxdb.sh
# Load InfluxDB environment variables
eval "$(influxdb_env)"
# Ensure directories used by InfluxDB exist and have proper ownership and permissions
for dir in "$INFLUXDB_VOLUME_DIR" "$INFLUXDB_DATA_DIR" "$INFLUXDB_DATA_WAL_DIR" "$INFLUXDB_META_DIR" "$INFLUXDB_CONF_DIR" "$INFLUXDB_INITSCRIPTS_DIR"; do
ensure_dir_exists "$dir"
chmod -R g+rwX "$dir"
done
touch "$HOME/.influx_history" && chmod g+rwX "$HOME/.influx_history"

View File

@@ -1,28 +0,0 @@
#!/bin/bash
# shellcheck disable=SC1091
set -o errexit
set -o nounset
set -o pipefail
# set -o xtrace # Uncomment this line for debugging purposes
# Load libraries
. /opt/bitnami/scripts/libos.sh
. /opt/bitnami/scripts/liblog.sh
. /opt/bitnami/scripts/libinfluxdb.sh
# Load InfluxDB environment variables
eval "$(influxdb_env)"
info "** Starting InfluxDB **"
start_command=("${INFLUXDB_BIN_DIR}/influxd" "$@")
am_i_root && start_command=("gosu" "$INFLUXDB_DAEMON_USER" "${start_command[@]}")
if [[ -f "$INFLUXDB_CONF_FILE" ]]; then
export INFLUXD_CONFIG_PATH=${INFLUXDB_CONF_FILE:-}
fi
export HOME=/bitnami/influxdb/
exec "${start_command[@]}"

View File

@@ -1,30 +0,0 @@
#!/bin/bash
# shellcheck disable=SC1091
set -o errexit
set -o nounset
set -o pipefail
# set -o xtrace # Uncomment this line for debugging purposes
# Load libraries
. /opt/bitnami/scripts/libfs.sh
. /opt/bitnami/scripts/libos.sh
. /opt/bitnami/scripts/libinfluxdb.sh
# Load InfluxDB environment variables
eval "$(influxdb_env)"
# Ensure InfluxDB environment variables are valid
influxdb_validate
# Ensure InfluxDB user and group exist when running as 'root'
if am_i_root; then
ensure_user_exists "$INFLUXDB_DAEMON_USER" --group "$INFLUXDB_DAEMON_GROUP"
chown -R "$INFLUXDB_DAEMON_USER" "$INFLUXDB_DATA_DIR" "$INFLUXDB_CONF_DIR"
fi
# Ensure InfluxDB is stopped when this script ends.
trap "influxdb_stop" EXIT
# Ensure InfluxDB is initialized
influxdb_initialize
# Allow running custom initialization scripts
influxdb_custom_init_scripts

View File

@@ -1,565 +0,0 @@
#!/bin/bash
#
# Bitnami InfluxDB library
# shellcheck disable=SC1090,SC1091
# Load Generic Libraries
. /opt/bitnami/scripts/liblog.sh
. /opt/bitnami/scripts/libos.sh
. /opt/bitnami/scripts/libvalidations.sh
# Functions
########################
# Load global variables used on InfluxDB configuration
# Globals:
# INFLUXDB_*
# Arguments:
# None
# Returns:
# Series of exports to be used as 'eval' arguments
#########################
influxdb_env() {
cat <<"EOF"
# Format log messages
export MODULE="influxdb"
export BITNAMI_DEBUG="${BITNAMI_DEBUG:-false}"
# Paths
export INFLUXDB_BASE_DIR="/opt/bitnami/influxdb"
export INFLUXDB_VOLUME_DIR="/bitnami/influxdb"
export INFLUXDB_BIN_DIR="${INFLUXDB_BASE_DIR}/bin"
export INFLUXDB_DATA_DIR="${INFLUXDB_DATA_DIR:-${INFLUXDB_VOLUME_DIR}/data}"
export INFLUXDB_DATA_WAL_DIR="${INFLUXDB_DATA_WAL_DIR:-${INFLUXDB_VOLUME_DIR}/wal}"
export INFLUXDB_META_DIR="${INFLUXDB_META_DIR:-${INFLUXDB_VOLUME_DIR}/meta}"
export INFLUXDB_CONF_DIR="${INFLUXDB_BASE_DIR}/etc"
export INFLUXDB_CONF_FILE="${INFLUXDB_CONF_DIR}/influxdb.conf"
export INFLUXDB_INITSCRIPTS_DIR="/docker-entrypoint-initdb.d"
# Users
export INFLUXDB_DAEMON_USER="influxdb"
export INFLUXDB_DAEMON_GROUP="influxdb"
# InfluxDB settings
export INFLUXDB_REPORTING_DISABLED="${INFLUXDB_REPORTING_DISABLED:-true}"
export INFLUXDB_HTTP_PORT_NUMBER="${INFLUXDB_HTTP_PORT_NUMBER:-8086}"
export INFLUXDB_HTTP_BIND_ADDRESS="${INFLUXDB_HTTP_BIND_ADDRESS:-0.0.0.0:${INFLUXDB_HTTP_PORT_NUMBER}}"
export INFLUXDB_HTTP_READINESS_TIMEOUT="${INFLUXDB_HTTP_READINESS_TIMEOUT:-60}"
export INFLUXDB_PORT_NUMBER="${INFLUXDB_PORT_NUMBER:-8088}"
export INFLUXDB_BIND_ADDRESS="${INFLUXDB_BIND_ADDRESS:-0.0.0.0:${INFLUXDB_PORT_NUMBER}}"
export INFLUXDB_PORT_READINESS_TIMEOUT="${INFLUXDB_PORT_READINESS_TIMEOUT:-30}"
# Authentication
export INFLUXDB_ADMIN_USER="${INFLUXDB_ADMIN_USER:-admin}"
export INFLUXDB_ADMIN_CONFIG_NAME="${INFLUXDB_ADMIN_CONFIG_NAME:-default}"
export INFLUXDB_ADMIN_ORG="${INFLUXDB_ADMIN_ORG:-primary}"
export INFLUXDB_ADMIN_BUCKET="${INFLUXDB_ADMIN_BUCKET:-primary}"
export INFLUXDB_ADMIN_RETENTION="${INFLUXDB_ADMIN_RETENTION:-0}"
export INFLUXDB_USER="${INFLUXDB_USER:-}"
export INFLUXDB_USER_ORG="${INFLUXDB_USER_ORG:-${INFLUXDB_ADMIN_ORG}}"
export INFLUXDB_USER_BUCKET="${INFLUXDB_USER_BUCKET:-}"
export INFLUXDB_CREATE_USER_TOKEN="${INFLUXDB_CREATE_USER_TOKEN:-no}"
export INFLUXDB_READ_USER="${INFLUXDB_READ_USER:-}"
export INFLUXDB_WRITE_USER="${INFLUXDB_WRITE_USER:-}"
export INFLUXDB_DB="${INFLUXDB_DB:-}"
# V2 required env vars aliases
export INFLUXD_ENGINE_PATH="${INFLUXDB_VOLUME_DIR}"
export INFLUXD_BOLT_PATH="${INFLUXDB_VOLUME_DIR}/influxd.bolt"
export INFLUXD_CONFIG_PATH="${INFLUXDB_CONF_DIR}/influxdb.conf"
export INFLUX_CONFIGS_PATH="${INFLUXDB_VOLUME_DIR}/configs"
export INFLUXD_HTTP_BIND_ADDRESS="${INFLUXDB_HTTP_BIND_ADDRESS}"
EOF
# The configuration can be provided in a configuration file or environment variables
# This setting is necessary to determine certain validations/actions during the
# initialization, so we need to check the config file when existing.
if [[ -f "/opt/bitnami/influxdb/etc/influxdb.conf" ]]; then
cat <<"EOF"
INFLUXDB_HTTP_AUTH_ENABLED="${INFLUXDB_HTTP_AUTH_ENABLED:-$(influxdb_conf_get "auth-enabled")}"
export INFLUXDB_HTTP_AUTH_ENABLED="${INFLUXDB_HTTP_AUTH_ENABLED:-true}"
EOF
else
cat <<"EOF"
export INFLUXDB_HTTP_AUTH_ENABLED="${INFLUXDB_HTTP_AUTH_ENABLED:-true}"
EOF
fi
# Credentials should be allowed to be mounted as files to avoid sensitive data
# in the environment variables
if [[ -f "${INFLUXDB_ADMIN_USER_PASSWORD_FILE:-}" ]]; then
cat <<"EOF"
export INFLUXDB_ADMIN_USER_PASSWORD="$(< "${INFLUXDB_ADMIN_USER_PASSWORD_FILE}")"
EOF
else
cat <<"EOF"
export INFLUXDB_ADMIN_USER_PASSWORD="${INFLUXDB_ADMIN_USER_PASSWORD:-}"
EOF
fi
if [[ -f "${INFLUXDB_ADMIN_USER_TOKEN_FILE:-}" ]]; then
cat <<"EOF"
export INFLUXDB_ADMIN_USER_TOKEN="$(< "${INFLUXDB_ADMIN_USER_TOKEN_FILE}")"
EOF
else
cat <<"EOF"
export INFLUXDB_ADMIN_USER_TOKEN="${INFLUXDB_ADMIN_USER_TOKEN:-}"
EOF
fi
if [[ -f "${INFLUXDB_USER_PASSWORD_FILE:-}" ]]; then
cat <<"EOF"
export INFLUXDB_USER_PASSWORD="$(< "${INFLUXDB_USER_PASSWORD_FILE}")"
EOF
else
cat <<"EOF"
export INFLUXDB_USER_PASSWORD="${INFLUXDB_USER_PASSWORD:-}"
EOF
fi
if [[ -f "${INFLUXDB_READ_USER_PASSWORD_FILE:-}" ]]; then
cat <<"EOF"
export INFLUXDB_READ_USER_PASSWORD="$(< "${INFLUXDB_READ_USER_PASSWORD_FILE}")"
EOF
else
cat <<"EOF"
export INFLUXDB_READ_USER_PASSWORD="${INFLUXDB_READ_USER_PASSWORD:-}"
EOF
fi
if [[ -f "${INFLUXDB_WRITE_USER_PASSWORD_FILE:-}" ]]; then
cat <<"EOF"
export INFLUXDB_WRITE_USER_PASSWORD="$(< "${INFLUXDB_WRITE_USER_PASSWORD_FILE}")"
EOF
else
cat <<"EOF"
export INFLUXDB_WRITE_USER_PASSWORD="${INFLUXDB_WRITE_USER_PASSWORD:-}"
EOF
fi
}
########################
# Validate settings in INFLUXDB_* env vars
# Globals:
# INFLUXDB_*
# Arguments:
# None
# Returns:
# None
#########################
influxdb_validate() {
local error_code=0
debug "Validating settings in INFLUXDB_* env vars..."
# Auxiliary functions
print_validation_error() {
error "$1"
error_code=1
}
check_password_file() {
if ! is_empty_value "${!1:-}" && ! [[ -f "${!1:-}" ]]; then
print_validation_error "The variable $1 is defined but the file ${!1} is not accessible or does not exist."
fi
}
check_true_false_value() {
if ! is_true_false_value "${!1}"; then
print_validation_error "The allowed values for $1 are [true, false]"
fi
}
check_conflicting_ports() {
local -r total="$#"
for i in $(seq 1 "$((total - 1))"); do
for j in $(seq "$((i + 1))" "$total"); do
if [[ "${!i}" -eq "${!j}" ]]; then
print_validation_error "${!i} and ${!j} are bound to the same port"
fi
done
done
}
# InfluxDB secret files validations
local -a pwd_file_envs=("INFLUXDB_ADMIN_USER_PASSWORD_FILE" "INFLUXDB_ADMIN_USER_TOKEN_FILE" "INFLUXDB_USER_PASSWORD_FILE")
for pwd_file in "${pwd_file_envs[@]}"; do
check_password_file "$pwd_file"
done
# InfluxDB authentication validations
if [[ -z "${INFLUXDB_ADMIN_USER_PASSWORD:-}" ]]; then
print_validation_error "Primary config authentication is required. Please, specify a password for the ${INFLUXDB_ADMIN_USER} user by setting the 'INFLUXDB_ADMIN_USER_PASSWORD' or 'INFLUXDB_ADMIN_USER_PASSWORD_FILE' environment variables."
fi
if [[ -z "${INFLUXDB_ADMIN_USER_TOKEN:-}" ]]; then
print_validation_error "Primary config authentication is required. Please, specify a token for the ${INFLUXDB_ADMIN_USER} user by setting the 'INFLUXDB_ADMIN_USER_TOKEN' or 'INFLUXDB_ADMIN_USER_TOKEN_FILE' environment variables."
fi
if [[ -n "${INFLUXDB_USER:-}" ]] && [[ -z "${INFLUXDB_USER_PASSWORD:-}" ]]; then
print_validation_error "User authentication is required. Please, specify a password for the ${INFLUXDB_USER} user by setting the 'INFLUXDB_USER_PASSWORD' or 'INFLUXDB_USER_PASSWORD_FILE' environment variables."
fi
# InfluxDB port validations
local -a ports_envs=("INFLUXDB_PORT_NUMBER" "INFLUXDB_HTTP_PORT_NUMBER")
for p in "${ports_envs[@]}"; do
if ! is_empty_value "${!p}" && ! err=$(validate_port -unprivileged "${!p}"); then
print_validation_error "An invalid port was specified in the environment variable ${p}: ${err}"
fi
done
check_conflicting_ports "${ports_envs[@]}"
[[ "$error_code" -eq 0 ]] || exit "$error_code"
}
########################
# Get a property's value from the the influxdb.conf file
# Globals:
# INFLUXDB_*
# Arguments:
# $1 - key
# $2 - section
# Returns:
# None
#########################
# TODO: use a golan binary (toml-parser)
influxdb_conf_get() {
local -r key="${1:?missing key}"
# local -r section="${2:?missing section}"
sed -n -e "s/^ *$key *= *//p" "$INFLUXDB_CONF_FILE"
# toml-parser -r "$section" "$key" "$INFLUXDB_CONF_FILE"
}
########################
# Create basic influxdb.conf file using the example provided in the etc/ folder
# Globals:
# INFLUXDB_*
# Arguments:
# None
# Returns:
# None
#########################
influxdb_create_config() {
local config_file="${INFLUXD_CONFIG_PATH}"
if [[ -f "${config_file}" ]]; then
info "Custom configuration ${INFLUXDB_CONF_FILE} detected!"
warn "The 'INFLUXDB_' environment variables override the equivalent options in the configuration file."
warn "If a configuration option is not specified in either the configuration file or in an environment variable, InfluxDB uses its internal default configuration"
else
info "No injected configuration files found. Creating default config files..."
touch "${config_file}"
fi
}
########################
# Create primary setup
# Globals:
# INFLUXDB_*
# Arguments:
# None
# Returns:
# None
#########################
influxdb_create_primary_setup() {
"${INFLUXDB_BIN_DIR}/influx" setup -f --name "${INFLUXDB_ADMIN_CONFIG_NAME}" \
--org "${INFLUXDB_ADMIN_ORG}" \
--bucket "${INFLUXDB_ADMIN_BUCKET}" \
--username "${INFLUXDB_ADMIN_USER}" \
--password "${INFLUXDB_ADMIN_USER_PASSWORD}" \
--token "${INFLUXDB_ADMIN_USER_TOKEN}" \
--retention "${INFLUXDB_ADMIN_RETENTION}"
}
########################
# Create organization
# Globals:
# INFLUXDB_*
# Arguments:
# None
# Returns:
# None
#########################
influxdb_create_org() {
INFLUX_ACTIVE_CONFIG="${INFLUXDB_ADMIN_CONFIG_NAME}" "${INFLUXDB_BIN_DIR}/influx" org create --name "${INFLUXDB_USER_ORG}"
}
########################
# Create bucket
# Globals:
# INFLUXDB_*
# Arguments:
# None
# Returns:
# None
#########################
influxdb_create_bucket() {
INFLUX_ACTIVE_CONFIG="${INFLUXDB_ADMIN_CONFIG_NAME}" "${INFLUXDB_BIN_DIR}/influx" bucket create \
"--org" "${INFLUXDB_USER_ORG:-${INFLUXDB_ADMIN_ORG}}" \
"--name" "${INFLUXDB_USER_BUCKET}"
}
########################
# Create user
# Globals:
# INFLUXDB_*
# Arguments:
# None
# Returns:
# None
#########################
influxdb_create_user() {
local username=${1:?missing username}
local password=${2:?missing password}
local kind=${3:-"admin"}
local params=("--org" "${INFLUXDB_USER_ORG:-${INFLUXDB_ADMIN_ORG}}" "--name" "${username}" "--password" "${password}")
INFLUX_ACTIVE_CONFIG="${INFLUXDB_ADMIN_CONFIG_NAME}" "${INFLUXDB_BIN_DIR}/influx" user create "${params[@]}"
if is_boolean_yes "${INFLUXDB_CREATE_USER_TOKEN}"; then
local read_grants=("--read-buckets" "--read-checks" "--read-dashboards" "--read-dbrp" "--read-notificationEndpoints" "--read-notificationRules" "--read-orgs" "--read-tasks")
local write_grants=("--write-buckets" "--write-checks" "--write-dashboards" "--write-dbrp" "--write-notificationEndpoints" "--write-notificationRules" "--write-orgs" "--write-tasks")
local -a grants
if [[ ${kind} = "admin" ]] || [[ ${kind} = "write" ]]; then
grants+=("${read_grants[@]}" "${write_grants[@]}")
elif [[ ${kind} = "read" ]]; then
grants+=("${read_grants[@]}")
else
echo "not supported user kind: ${kind}" && exit 1
fi
INFLUX_ACTIVE_CONFIG="${INFLUXDB_ADMIN_CONFIG_NAME}" "${INFLUXDB_BIN_DIR}/influx" auth create \
--user "${username}" \
--org "${INFLUXDB_USER_ORG:-${INFLUXDB_ADMIN_ORG}}" "${grants[@]}"
fi
}
########################
# Start InfluxDB in background disabling authentication and waits until it's ready
# Globals:
# INFLUXDB_*
# Arguments:
# None
# Returns:
# None
#########################
influxdb_start_bg_noauth() {
info "Starting InfluxDB in background..."
local start_command=("${INFLUXDB_BIN_DIR}/influxd")
# if root user then run it with gosu
am_i_root && start_command=("gosu" "$INFLUXDB_DAEMON_USER" "${start_command[@]}")
INFLUXDB_HTTP_HTTPS_ENABLED=false INFLUXDB_HTTP_BIND_ADDRESS="127.0.0.1:${INFLUXDB_HTTP_PORT_NUMBER}" debug_execute "${start_command[@]}" &
wait-for-port --timeout="$INFLUXDB_PORT_READINESS_TIMEOUT" "$INFLUXDB_HTTP_PORT_NUMBER"
wait-for-influxdb
}
########################
# Waits for InfluxDB to be ready
# Times out after 60 seconds
# Globals:
# INFLUXDB_*
# Arguments:
# None
# Returns:
# None
########################
wait-for-influxdb() {
curl -sSL -I "127.0.0.1:${INFLUXDB_HTTP_PORT_NUMBER}/ping?wait_for_leader=${INFLUXDB_HTTP_READINESS_TIMEOUT}s" >/dev/null 2>&1
}
########################
# Check if InfluxDB is running
# Globals:
# INFLUXDB_*
# Arguments:
# None
# Returns:
# Boolean
#########################
is_influxdb_running() {
if pgrep "influxd" >/dev/null 2>&1; then
true
else
false
fi
}
########################
# Stop InfluxDB
# Globals:
# INFLUXDB_*
# Arguments:
# None
# Returns:
# None
#########################
influxdb_stop() {
info "Stopping InfluxDB..."
! is_influxdb_running && return
pkill --full --signal TERM "$INFLUXDB_BASE_DIR"
wait-for-port --state free "$INFLUXDB_PORT_NUMBER"
}
########################
# Execute an arbitrary query using InfluxDB CLI
# Globals:
# INFLUXDB_*
# Arguments:
# $1 - Query to execute
# $2 - Whether to use admin credentials to run the command or not
# Returns:
# None
#########################
influxdb_execute_query() {
local -r query="${1:-query is required}"
local authenticate="${2:-false}"
local flags=("-host" "127.0.0.1" "-port" "$INFLUXDB_HTTP_PORT_NUMBER")
is_boolean_yes "$authenticate" && flags+=("-username" "${INFLUXDB_ADMIN_USER}" "-password" "${INFLUXDB_ADMIN_USER_PASSWORD}")
debug_execute "${INFLUXDB_BIN_DIR}/influx" "${flags[@]}" "-execute" "$query"
}
########################
# Creates the admin user
# Globals:
# INFLUXDB_*
# Arguments:
# None
# Returns:
# None
#########################
influxdb_create_admin_user() {
debug "Creating admin user..."
influxdb_execute_query "CREATE USER \"${INFLUXDB_ADMIN_USER}\" WITH PASSWORD '${INFLUXDB_ADMIN_USER_PASSWORD}' WITH ALL PRIVILEGES"
}
########################
# Creates a database
# Globals:
# INFLUXDB_*
# Arguments:
# $1 - Database name
# Returns:
# None
#########################
influxdb_create_db() {
local -r db="${1:?db is required}"
debug "Creating database \"${db}\"..."
influxdb_execute_query "CREATE DATABASE ${db}" "true"
}
########################
# Creates a database
# Globals:
# INFLUXDB_*
# Arguments:
# $1 - User name
# $2 - Database name
# $3 - Role
# Returns:
# None
#########################
influxdb_grant() {
local -r user="${1:?user is required}"
local -r db="${2:?db is required}"
local -r role="${3:?role is required}"
debug "Granting \"${role}\" permissions to user ${user} on database \"${db}\"..."
influxdb_execute_query "GRANT ${role} ON \"${db}\" TO \"${user}\"" "true"
}
########################
# Gets the role for an user
# Arguments:
# $1 - user
# Returns:
# String
#########################
influxdb_user_role() {
local role
local -r user="${1:?user is required}"
role="${user//_/}"
role="${role%USER}"
role="${role#INFLUXDB}"
echo "${role:-ALL}"
}
########################
# Ensure InfluxDB is initialized
# Globals:
# INFLUXDB_*
# Arguments:
# None
# Returns:
# None
#########################
influxdb_initialize() {
influxdb_create_config
if [[ ! -f "${INFLUX_CONFIGS_PATH}" ]]; then
influxdb_start_bg_noauth
info "Deploying InfluxDB from scratch"
info "Creating primary setup..."
influxdb_create_primary_setup
if [[ -n "${INFLUXDB_USER_ORG}" ]] && [[ "${INFLUXDB_USER_ORG}" != "${INFLUXDB_ADMIN_ORG}" ]]; then
info "Creating custom org with id: ${INFLUXDB_USER_ORG}..."
influxdb_create_org
fi
if [[ -n "${INFLUXDB_USER_BUCKET}" ]]; then
info "Creating custom bucket with id: ${INFLUXDB_USER_BUCKET} in org with id: ${INFLUXDB_USER_ORG:-${INFLUXDB_ADMIN_ORG}}..."
influxdb_create_bucket
fi
if [[ -n "${INFLUXDB_USER}" ]]; then
info "Creating custom user with username: ${INFLUXDB_USER} in org with id: ${INFLUXDB_USER_ORG:-${INFLUXDB_ADMIN_ORG}}..."
influxdb_create_user "${INFLUXDB_USER}" "${INFLUXDB_USER_PASSWORD}"
fi
if [[ -n "${INFLUXDB_READ_USER}" ]]; then
info "Creating custom user with username: ${INFLUXDB_READ_USER} in org with id: ${INFLUXDB_USER_ORG:-${INFLUXDB_ADMIN_ORG}}..."
influxdb_create_user "${INFLUXDB_READ_USER}" "${INFLUXDB_READ_USER_PASSWORD}" "read"
fi
if [[ -n "${INFLUXDB_WRITE_USER}" ]]; then
info "Creating custom user with username: ${INFLUXDB_WRITE_USER} in org with id: ${INFLUXDB_USER_ORG:-${INFLUXDB_ADMIN_ORG}}..."
influxdb_create_user "${INFLUXDB_WRITE_USER}" "${INFLUXDB_WRITE_USER_PASSWORD}" "write"
fi
else
info "influx CLI configuration ${INFLUXDB_CONF_FILE} detected!"
info "Deploying InfluxDB with persisted data"
fi
}
########################
# Run custom initialization scripts
# Globals:
# INFLUXDB_*
# Arguments:
# None
# Returns:
# None
#########################
influxdb_custom_init_scripts() {
if [[ -n $(find "${INFLUXDB_INITSCRIPTS_DIR}/" -type f -regex ".*\.\(sh\|txt\)") ]] && [[ ! -f "${INFLUXDB_INITSCRIPTS_DIR}/.user_scripts_initialized" ]]; then
info "Loading user's custom files from ${INFLUXDB_INITSCRIPTS_DIR} ..."
local -r tmp_file="/tmp/filelist"
if ! is_influxdb_running; then
influxdb_start_bg_noauth
fi
find "${INFLUXDB_INITSCRIPTS_DIR}/" -type f -regex ".*\.\(sh\|txt\)" | sort >"$tmp_file"
while read -r f; do
case "$f" in
*.sh)
if [[ -x "$f" ]]; then
debug "Executing $f"
"$f"
else
debug "Sourcing $f"
. "$f"
fi
;;
*.txt)
debug "Executing $f"
influxdb_execute_query "$(<"$f")"
;;
*) debug "Ignoring $f" ;;
esac
done <$tmp_file
rm -f "$tmp_file"
touch "$INFLUXDB_VOLUME_DIR"/.user_scripts_initialized
fi
}