|
|
#!/bin/bash
. /etc/shlib >/dev/null 2>&1 || { echo "Error: you don't have kal-shlib-core installed." echo "" echo " You might want to add `deb.kalysto.org` deb repository, you'll need root access," echo " so you might want to run these command after a \`sudo -i\` for instance..." echo "" echo " echo deb https://deb.kalysto.org no-dist kal-alpha kal-beta kal-main \\" echo " > /etc/apt/sources.list.d/kalysto.org.list" echo " wget -O - https://deb.kalysto.org/conf/public-key.gpg | apt-key add -" echo " apt-get update -o Dir::Etc::sourcelist=sources.list.d/kalysto.org.list \\" echo " -o Dir::Etc::sourceparts=- -o APT::Get::List-Cleanup=0" echo "" echo " Then install package kal-shlib-*:" echo "" echo " apt install kal-shlib-{common,cmdline,config,cache,docker,pretty}" echo "" exit 1 } >&2
include common include parse include cmdline include config include cache include fn include docker
[[ "${BASH_SOURCE[0]}" != "${0}" ]] && SOURCED=true
version=0.1 desc='Install backup' help=""
version_gt() { test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"; }
read-0a-err() { local ret="$1" eof="" idx=0 last= read -r -- "${ret?}" <<<"0" shift while [ "$1" ]; do last=$idx read -r -- "$1" || { ## Put this last value in ${!ret} eof="$1" read -r -- "$ret" <<<"${!eof}" break } ((idx++)) shift done [ -z "$eof" ] || { if [ "$last" != 0 ]; then echo "Error: read-0a-err couldn't fill all value" >&2 read -r -- "$ret" <<<"127" else if [ -z "${!ret}" ]; then echo "Error: last value is not a number, did you finish with an errorlevel ?" >&2 read -r -- "$ret" <<<"126" fi fi false } }
p-0a-err() { "$@" echo -n "$?" }
docker:running-container-projects() { :cache: scope=session
docker ps --format '{{.Label "com.docker.compose.project"}}' | sort | uniq } decorator._mangle_fn docker:running-container-projects
ssh:mk-private-key() { local host="$1" service_name="$2" ( settmpdir VPS_TMPDIR ssh-keygen -t rsa -N "" -f "$VPS_TMPDIR/rsync_rsa" -C "$service_name@$host" >/dev/null cat "$VPS_TMPDIR/rsync_rsa" ) }
mailcow:has-images-running() { local images images=$(docker ps --format '{{.Image}}' | sort | uniq) [[ $'\n'"$images" == *$'\n'"mailcow/"* ]] }
mailcow:has-container-project-mentionning-mailcow() { local projects projects=$(docker:running-container-projects) || return 1 [[ $'\n'"$projects"$'\n' == *mailcow* ]] }
mailcow:has-running-containers() { mailcow:has-images-running || mailcow:has-container-project-mentionning-mailcow }
mailcow:get-root() { :cache: scope=session
local dir
for dir in {/opt{,/apps},/root}/mailcow-dockerized; do [ -d "$dir" ] || continue [ -r "$dir/mailcow.conf" ] || continue echo "$dir" return 0 done return 1 } decorator._mangle_fn mailcow:get-root
compose:get-compose-yml() { :cache: scope=session
local path
path=$(DEBUG=1 DRY_RUN=1 compose 2>&1 | egrep '^\s+-e HOST_COMPOSE_YML_FILE=' | cut -f 2- -d "=" | cut -f 1 -d " ")
[ -e "$path" ] || return 1 echo "$path" } decorator._mangle_fn compose:get-compose-yml export -f compose:get-compose-yml
compose:has-container-project-myc() { local projects projects=$(docker:running-container-projects) || return 1 [[ $'\n'"$projects"$'\n' == *$'\n'"myc"$'\n'* ]] }
compose:file:value-change() { local key="$1" value="$2" local compose_yml if ! compose_yml=$(compose:get-compose-yml); then err "Couldn't locate your 'compose.yml' file." return 1 fi yaml:file:value-change "$compose_yml" "$key" "$value" || return 1 } export -f compose:file:value-change
yaml:file:value-change() { local file="$1" key="$2" value="$3" first=1 count=0 diff="" ( cd "${file%/*}" while read-0 hunk; do if [ -n "$first" ]; then diff+="$hunk" first= continue fi if [[ "$hunk" =~ $'\n'"+"[[:space:]]+"${key##*.}:" ]]; then ((count++)) diff+="$hunk" >&2 else : # echo "discarding:" >&2 # e "$hunk" | prefix " | " >&2 fi done < <( export DEBUG= settmpdir YQ_TEMP cp "${file}" "$YQ_TEMP/compose.yml" && yq -i ".${key} = \"${value}\"" "$YQ_TEMP/compose.yml" && sed -ri 's/^([^# ])/\n\0/g' "$YQ_TEMP/compose.yml" && diff -u0 -Z "${file}" "$YQ_TEMP/compose.yml" | sed -r "s/^(@@.*)$/\x00\1/g;s%^(\+\+\+) [^\t]+%\1 ${file}%g" printf "\0" ) if [[ "$count" == 0 ]]; then err "No change made to '$file'." return 1 fi if [[ "$count" != 1 ]]; then err "compose file change request seems dubious and was refused:" e "$diff" | prefix " | " >&2 return 1 fi echo Applying: >&2 e "$diff" | prefix " | " >&2 patch <<<"$diff" ) || exit 1 } export -f yaml:file:value-change
type:is-mailcow() { mailcow:get-root >/dev/null || mailcow:has-running-containers }
type:is-compose() { compose:get-compose-yml >/dev/null && compose:has-container-project-myc }
vps:get-type() {
:cache: scope=session
local fn for fn in $(declare -F | cut -f 3 -d " " | egrep "^type:is-"); do "$fn" && { echo "${fn#type:is-}" return 0 } done return 1 } decorator._mangle_fn vps:get-type
mirror-dir:sources() { :cache: scope=session
if ! shyaml get-values default.sources < /etc/mirror-dir/config.yml; then err "Couldn't query 'default.sources' in '/etc/mirror-dir/config.yml'." return 1 fi } decorator._mangle_fn mirror-dir:sources
mirror-dir:check-add() { local elt="$1" sources sources=$(mirror-dir:sources) || return 1 if [[ $'\n'"$sources"$'\n' == *$'\n'"$elt"$'\n'* ]]; then info "Volume $elt already in sources" else Elt "Adding directory $elt" sed -i "/sources:/a\ - \"${elt}\"" \ /etc/mirror-dir/config.yml Feedback || return 1 fi }
mirror-dir:check-add-vol() { local elt="$1" mirror-dir:check-add "/var/lib/docker/volumes/*_${elt}-*/_data" }
## The first colon is to prevent auto-export of function from shlib : ; bash-bug-5() { { cat; } < <(e) >/dev/null; ! cat "$1"; } && bash-bug-5 <(e) 2>/dev/null && export BASH_BUG_5=1 && unset -f bash_bug_5
wrap() { local label="$1" code="$2" shift 2 export VERBOSE=1 interpreter=/bin/bash if [ -n "$BASH_BUG_5" ]; then ( settmpdir tmpdir fname=${label##*/} e "$code" > "$tmpdir/$fname" && chmod +x "$tmpdir/$fname" && Wrap -vsd "$label" -- "$interpreter" "$tmpdir/$fname" "$@" ) else Wrap -vsd "$label" -- "$interpreter" <(e "$code") "$@" fi }
ping_check() { #global ignore_ping_check local host="$1"
ip=$(getent ahosts "$host" | egrep "^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\s+" | head -n 1 | cut -f 1 -d " ") || return 1
my_ip=$(curl -s myip.kal.fr) if [ "$ip" != "$my_ip" ]; then if [ -n "$ignore_ping_check" ]; then warn "IP of '$host' ($ip) doesn't match mine ($my_ip). Ignoring due to ``--ignore-ping-check`` option." else err "IP of '$host' ($ip) doesn't match mine ($my_ip). Use ``--ignore-ping-check`` to ignore check." return 1 fi fi }
mailcow:install-backup() {
local BACKUP_SERVER="$1" ignore_ping_check="$2" mailcow_root DOMAIN
## find installation mailcow_root=$(mailcow:get-root) || { err "Couldn't find a valid mailcow root directory." return 1 }
## check ok
DOMAIN=$(cat "$mailcow_root/.env" | grep ^MAILCOW_HOSTNAME= | cut -f 2 -d =) || { err "Couldn't find MAILCOW_HOSTNAME in file \"$mailcow_root/.env\"." return 1 }
ping_check "$DOMAIN" || return 1
MYSQL_ROOT_PASSWORD=$(cat "$mailcow_root/.env" | grep ^DBROOT= | cut -f 2 -d =) || { err "Couldn't find DBROOT in file \"$mailcow_root/.env\"." return 1 }
if docker compose >/dev/null 2>&1; then MYSQL_CONTAINER=${MYSQL_CONTAINER:-mailcowdockerized-mysql-mailcow-1} else MYSQL_CONTAINER=${MYSQL_CONTAINER:-mailcowdockerized_mysql-mailcow_1} fi container_id=$(docker ps -f name="$MYSQL_CONTAINER" --format "{{.ID}}") if [ -z "$container_id" ]; then err "Couldn't find docker container named '$MYSQL_CONTAINER'." return 1 fi
export KEY_BACKUP_ID="mailcow" export MYSQL_ROOT_PASSWORD export MYSQL_CONTAINER export BACKUP_SERVER export DOMAIN
wrap "Install rsync-backup on host" " cd /srv/charm-store/rsync-backup bash ./hooks/install.d/60-install.sh " || return 1
wrap "Mysql dump install" " cd /srv/charm-store/mariadb bash ./hooks/install.d/60-backup.sh " || return 1
## Using https://github.com/mailcow/mailcow-dockerized/blob/master/helper-scripts/backup_and_restore.sh for elt in "vmail{,-attachments-vol}" crypt redis rspamd postfix; do mirror-dir:check-add-vol "$elt" || return 1 done
mirror-dir:check-add "$mailcow_root" || return 1 mirror-dir:check-add "/var/backups/mysql" || return 1 mirror-dir:check-add "/etc" || return 1
dest="$BACKUP_SERVER" dest="${dest%/*}" ssh_options=() if [[ "$dest" == *":"* ]]; then port="${dest##*:}" dest="${dest%%:*}" ssh_options=(-p "$port") else port="" dest="${dest%%:*}" fi
info "You can run this following command from an host having admin access to $dest:" echo " (Or send it to a backup admin of $dest)" >&2 echo "ssh ${ssh_options[@]} myadmin@$dest ssh-key add '$(cat /var/lib/rsync/.ssh/id_rsa.pub)'" }
compose:has_domain() { local compose_file="$1" host="$2" name conf relation relation_value domain server_aliases
while read-0 name conf ; do name=$(e "$name" | shyaml get-value) if [[ "$name" =~ ^[a-zA-Z0-9-]+\.[a-zA-Z0-9-]+ ]]; then [ "$host" == "$name" ] && return 0 fi
rel=$(e "$conf" | shyaml -y get-value relations 2>/dev/null) || continue for relation in web-proxy publish-dir; do relation_value=$(e "$rel" | shyaml -y get-value "$relation" 2>/dev/null) || continue while read-0 label conf_relation; do domain=$(e "$conf_relation" | shyaml get-value "domain" 2>/dev/null) && { [ "$host" == "$domain" ] && return 0 } server_aliases=$(e "$conf_relation" | shyaml get-values "server-aliases" 2>/dev/null) && { [[ $'\n'"$server_aliases" == *$'\n'"$host"$'\n'* ]] && return 0 } done < <(e "$relation_value" | shyaml -y key-values-0) done done < <(shyaml -y key-values-0 < "$compose_file") return 1 }
compose:install-backup() {
local BACKUP_SERVER="$1" service_name="$2" compose_file="$3" ignore_ping_check="$4" ignore_domain_check="$5"
## XXXvlab: far from perfect as it mimics and depends internal ## logic of current default way to get a domain in compose-core host=$(hostname)
if ! compose:has_domain "$compose_file" "$host"; then if [ -n "$ignore_domain_check" ]; then warn "domain of '$host' not found in compose file '$compose_file'. Ignoring due to ``--ignore-domain-check`` option." else err "domain of '$host' not found in compose file '$compose_file'. Use ``--ignore-domain-check`` to ignore check." return 1 fi fi
ping_check "$host" || return 1
if [ -e "/root/.ssh/rsync_rsa" ]; then warn "deleting private key in /root/.ssh/rsync_rsa, has we are not using it anymore." rm -fv /root/.ssh/rsync_rsa fi if [ -e "/root/.ssh/rsync_rsa.pub" ]; then warn "deleting public key in /root/.ssh/rsync_rsa.pub, has we are not using it anymore." rm -fv /root/.ssh/rsync_rsa.pub fi
if service_cfg=$(cat "$compose_file" | shyaml get-value -y "$service_name" 2>/dev/null); then info "Entry for service ${DARKYELLOW}$service_name${NORMAL}" \ "is already present in '$compose_file'." cfg=$(e "$service_cfg" | shyaml get-value -y options) || { err "No ${WHITE}options${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \ "entry in '$compose_file'." return 1 } private_key=$(e "$cfg" | shyaml get-value private-key) || return 1 target=$(e "$cfg" | shyaml get-value target) || return 1 if [ "$target" != "$BACKUP_SERVER" ]; then err "Existing backup target '$target' is different" \ "from specified '$BACKUP_SERVER'" return 1 fi else private_key=$(ssh:mk-private-key "$host" "$service_name")
cat <<EOF >> "$compose_file"
$service_name: options: ident: $host target: $BACKUP_SERVER private-key: | $(e "$private_key" | sed -r 's/^/ /g') EOF fi
dest="$BACKUP_SERVER" dest="${dest%/*}" ssh_options=() if [[ "$dest" == *":"* ]]; then port="${dest##*:}" dest="${dest%%:*}" ssh_options=(-p "$port") else port="" dest="${dest%%:*}" fi
info "You can run this following command from an host having admin access to $dest:" echo " (Or send it to a backup admin of $dest)" >&2 ## We remove ending label (label will be added or not in the ## private key, and thus here, depending on the version of ## openssh-client) public_key=$(ssh-keygen -y -f <(e "$private_key"$'\n') | sed -r 's/ [^ ]+@[^ ]+$//') echo "ssh ${ssh_options[@]} myadmin@$dest ssh-key add '$public_key compose@$host'" }
backup-action() { local action="$1" shift vps_type=$(vps:get-type) || { err "Failed to get type of installation." return 1 } if ! fn.exists "${vps_type}:${action}"; then err "type '${vps_type}' has no ${vps_type}:${action} implemented yet." return 1 fi "${vps_type}:${action}" "$@" }
compose:get_default_backup_host_ident() { local service_name="$1" ## Optional local compose_file service_cfg cfg target
compose_file=$(compose:get-compose-yml) service_name="${service_name:-rsync-backup}" if ! service_cfg=$(cat "$compose_file" | shyaml get-value -y "$service_name" 2>/dev/null); then err "No service named '$service_name' found in 'compose.yml'." return 1 fi cfg=$(e "$service_cfg" | shyaml get-value -y options) || { err "No ${WHITE}options${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \ "entry in '$compose_file'." return 1 } if ! target=$(e "$cfg" | shyaml get-value target); then err "No ${WHITE}options.target${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \ "entry in '$compose_file'." fi if ! target=$(e "$cfg" | shyaml get-value target); then err "No ${WHITE}options.target${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \ "entry in '$compose_file'." fi if ! ident=$(e "$cfg" | shyaml get-value ident); then err "No ${WHITE}options.ident${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \ "entry in '$compose_file'." fi echo "$target $ident" }
mailcow:get_default_backup_host_ident() { local content cron_line ident found dest cmd_line if ! [ -e "/etc/cron.d/mirror-dir" ]; then err "No '/etc/cron.d/mirror-dir' found." return 1 fi content=$(cat /etc/cron.d/mirror-dir) || { err "Can't read '/etc/cron.d/mirror-dir'." return 1 } if ! cron_line=$(e "$content" | grep "mirror-dir backup"); then err "Can't find 'mirror-dir backup' line in '/etc/cron.d/mirror-dir'." return 1 fi
cron_line=${cron_line%|*} cmd_line=(${cron_line#*root})
found= dest= for arg in "${cmd_line[@]}"; do [ -n "$found" ] && { dest="$arg" break } [ "$arg" == "-d" ] && { found=1 } done
if ! [[ "$dest" =~ ^[\'\"a-zA-Z0-9:/.-]+$ ]]; then err "Can't find valid destination in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'." return 1 fi
if [[ "$dest" == \"*\" ]] || [[ "$dest" == \'*\' ]]; then ## unquoting, the eval should be safe because of previous check dest=$(eval e "$dest") fi if [ -z "$dest" ]; then err "Can't find destination in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'." return 1 fi
## looking for ident
found= ident= for arg in "${cmd_line[@]}"; do [ -n "$found" ] && { ident="$arg" break } [ "$arg" == "-h" ] && { found=1 } done
if ! [[ "$ident" =~ ^[\'\"a-zA-Z0-9.-]+$ ]]; then err "Can't find valid identifier in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'." return 1 fi
if [[ "$ident" == \"*\" ]] || [[ "$ident" == \'*\' ]]; then ## unquoting, the eval should be safe because of previous check ident=$(eval e "$ident") fi if [ -z "$ident" ]; then err "Can't find destination in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'." return 1 fi
echo "$dest $ident" }
compose:service:containers() { local project="$1" service="$2"
docker ps \ --filter label="com.docker.compose.project=$project" \ --filter label="compose.master-service=$service" \ --format="{{.ID}}" } export -f compose:service:containers
compose:service:container_one() { local project="$1" service="$2" container_id { read-0a container_id || { err "service ${DARKYELLOW}$service${NORMAL} has no running container." return 1 } if read-0a _; then err "service ${DARKYELLOW}$service${NORMAL} has more than one running container." return 1 fi } < <(compose:service:containers "$project" "$service") echo "$container_id" } export -f compose:service:container_one
compose:service:container_first() { local project="$1" service="$2" container_id { read-0a container_id || { err "service ${DARKYELLOW}$service${NORMAL} has no running container." return 1 } if read-0a _; then warn "service ${DARKYELLOW}$service${NORMAL} has more than one running container." fi } < <(compose:service:containers "$project" "$service") echo "$container_id" } export -f compose:service:container_first
docker:running_containers() { :cache: scope=session
docker ps --format="{{.ID}}" } decorator._mangle_fn docker:running_containers export -f docker:running_containers
compose:project:containers() { local project="$1" opts
opts+=(--filter label="com.docker.compose.project=$project") docker ps "${opts[@]}" \ --format="{{.ID}}" } export -f compose:project:containers
compose:charm:containers() { local project="$1" charm="$2"
docker ps \ --filter label="com.docker.compose.project=$project" \ --filter label="compose.charm=$charm" \ --format="{{.ID}}" } export -f compose:charm:containers
compose:charm:container_one() { local project="$1" charm="$2" container_id { read-0a container_id || { err "charm ${DARKPINK}$charm${NORMAL} has no running container in project '$project'." return 1 } if read-0a _; then err "charm ${DARKPINK}$charm${NORMAL} has more than one running container." return 1 fi } < <(compose:charm:containers "$project" "$charm") echo "$container_id" } export -f compose:charm:container_one
compose:charm:container_first() { local project="$1" charm="$2" container_id { read-0a container_id || { warn "charm ${DARKYELLOW}$charm${NORMAL} has no running container in project '$project'." } if read-0a _; then warn "charm ${DARKYELLOW}$charm${NORMAL} has more than one running container." fi } < <(compose:charm:containers "$project" "$charm") echo "$container_id" } export -f compose:charm:container_first
compose:get_url() { local project_name="$1" service="$2" data_file network ip data_dir=("/var/lib/compose/relations/${project_name}/${service}-"*"/web-proxy") if [ "${#data_dir[@]}" -gt 1 ]; then err "More than one web-proxy relation." \ "Current 'vps' algorithm is insufficient" \ "to figure out which relation is concerned" return 1 fi data_file="${data_dir[0]}/data" if [ -d "${data_file%/*}" ]; then ( set -o pipefail ## users can't cat directly the content docker run --rm \ -v "${data_file%/*}":/tmp/dummy alpine \ cat "/tmp/dummy/${data_file##*/}" | shyaml get-value url ) else ## Assume there are no frontend relation here, the url is direct IP container_id=$(compose:service:container_one "${project_name}" "${service}") || return 1 network_ip=$(docker:container:network_ip_one "${container_id}") || return 1 IFS=":" read -r network ip <<<"$network_ip" tcp_port= for port in $(docker:exposed_ports "$container_id"); do IFS="/" read port type <<<"$port" [ "$type" == "tcp" ] || continue tcp_port="$port" break done
echo -n "http://$ip" [ -n "$tcp_port" ] && echo ":$tcp_port" fi || { err "Failed querying ${service} to frontend relation to get url." return 1 } } export -f compose:get_url
compose:container:service() { local container="$1" service if ! service=$(docker:container:label "$container" "compose.service"); then err "Failed to get service name from container ${container}." return 1 fi if [ -z "$service" ]; then err "No service found for container ${container}." return 1 fi echo "$service" } export -f compose:container:service
compose:psql() { local project_name="$1" dbname="$2" container_id shift 2 container_id=$(compose:charm:container_one "$project_name" "postgres") || return 1 docker exec -i "${container_id}" psql -U postgres "$dbname" "$@" } export -f compose:psql
compose:mongo() { local project_name="$1" dbname="$2" container_id container_id=$(compose:charm:container_one "$project_name" "mongo") || return 1 docker exec -i "${container_id}" mongo --quiet "$dbname" } export -f compose:mongo
compose:pgm() { local project_name="$1" container_network_ip container_ip container_network shift
container_id=$(compose:charm:container_one "$project_name" "postgres") || return 1 service_name=$(compose:container:service "$container_id") || return 1 image_id=$(docker:container:image "$container_id") || return 1 container_network_ip=$(docker:container:network_ip_one "$container_id") || return 1 IFS=":" read -r container_network container_ip <<<"$container_network_ip"
pgpass="/srv/datastore/data/${service_name}/var/lib/postgresql/data/pgpass"
local final_pgm_docker_run_opts+=( -u 0 -e prefix_pg_local_command=" " --network "${container_network}" -e PGHOST="$container_ip" -e PGUSER=postgres -v "$pgpass:/root/.pgpass" "${pgm_docker_run_opts[@]}" )
cmd=(docker run --rm \ "${final_pgm_docker_run_opts[@]}" \ "${image_id}" pgm "$@" ) echo "${cmd[@]}" "${cmd[@]}" } export -f compose:pgm
postgres:dump() { local project_name="$1" src="$2" dst="$3"
( settmpdir PGM_TMP_LOCATION
pgm_docker_run_opts=('-v' "${PGM_TMP_LOCATION}:/tmp/dump") compose:pgm "$project_name" cp -f "$src" "/tmp/dump/dump.gz" && mv "$PGM_TMP_LOCATION/dump.gz" "$dst" ) || return 1 } export -f postgres:dump
postgres:restore() { local project_name="$1" src="$2" dst="$3"
full_src_path=$(readlink -e "$src") || exit 1 ( pgm_docker_run_opts=('-v' "${full_src_path}:/tmp/dump.gz") compose:pgm "$project_name" cp -f "/tmp/dump.gz" "$dst" ) || return 1 } export -f postgres:restore
odoo:get_public_user_id() { local project_name="$1" dbname="$2" echo "select res_id from ir_model_data where model = 'res.users' and name = 'public_user';" | compose:psql "$project_name" "$dbname" -qAt }
cyclos:set_root_url() { local project_name="$1" dbname="$2" url="$3"
echo "UPDATE configurations SET root_url = '$url';" | compose:psql "$project_name" "$dbname" || { err "Failed to set cyclos url value in '$dbname' database." return 1 } } export -f cyclos:set_root_url
cyclos:unlock() { local project_name="$1" dbname="$2"
echo "delete from database_lock;" | compose:psql "${project_name}" "${dbname}" } export -f cyclos:unlock
rocketchat:drop-indexes() { local project_name="$1" dbname="$2"
echo "db.users.dropIndexes()" | compose:mongo "${project_name}" "${dbname}" } export -f rocketchat:drop-indexes
compose:project_name() { if [ -z "$PROJECT_NAME" ]; then PROJECT_NAME=$(compose --get-project-name) || { err "Couldn't get project name." return 1 } if [ -z "$PROJECT_NAME" -o "$PROJECT_NAME" == "orphan" ]; then err "Couldn't get project name, probably because 'compose.yml' wasn't found." echo " Please ensure to either configure a global 'compose.yml' or run this command" >&2 echo " in a compose project (with 'compose.yml' on the top level directory)." >&2 return 1 fi export PROJECT_NAME fi echo "$PROJECT_NAME" } export -f compose:project_name
compose:get_cron_docker_cmd() { local cron_line cmd_line docker_cmd project_name=$(compose:project_name) || return 1
if ! cron_line=$(docker exec "${project_name}"_cron_1 cat /etc/cron.d/rsync-backup | grep "\* \* \*"); then err "Can't find cron_line in cron container." echo " Have you forgotten to run 'compose up' ?" >&2 return 1 fi
cron_line=${cron_line%|*} cron_line=${cron_line%"2>&1"*}
cmd_line="${cron_line#*root}" eval "args=($cmd_line)"
## should be last argument
docker_cmd=$(echo ${args[@]: -1})
if ! [[ "$docker_cmd" == "docker run --rm -e "* ]]; then echo "docker command found should start with 'docker run'." >&2 echo "Here's command:" >&2 echo " $docker_cmd" >&2 return 1 fi
e "$docker_cmd" }
compose:recover-target() { local backup_host="$1" ident="$2" src="$3" dst="$4" service_name="${5:-rsync-backup}" project_name project_name=$(compose:project_name) || return 1
docker_image="${project_name}_${service_name}" if ! docker_has_image "$docker_image"; then compose build "${service_name}" || { err "Couldn't find nor build image for service '$service_name'." return 1 } fi
dst="${dst%/}" ## remove final slash
ssh_options=(-o StrictHostKeyChecking=no) if [[ "$backup_host" == *":"* ]]; then port="${backup_host##*:}" backup_host="${backup_host%%:*}" ssh_options+=(-p "$port") else port="" backup_host="${backup_host%%:*}" fi
rsync_opts=( -e "ssh ${ssh_options[*]} -i /var/lib/rsync/.ssh/id_rsa -l rsync" -azvArH --delete --delete-excluded --partial --partial-dir .rsync-partial --numeric-ids ) if [ "$DRY_RUN" ]; then rsync_opts+=("-n") fi cmd=( docker run --rm --entrypoint rsync \ -v "/srv/datastore/config/${service_name}/var/lib/rsync":/var/lib/rsync \ -v "${dst%/*}":/mnt/dest \ "$docker_image" \ "${rsync_opts[@]}" "$backup_host":"/var/mirror/$ident/$src" "/mnt/dest/${dst##*/}" ) echo "${WHITE}Launching: ${NORMAL} ${cmd[@]}" "${cmd[@]}" }
mailcow:recover-target() { local backup_host="$1" ident="$2" src="$3" dst="$4"
dst="${dst%/}" ## remove final slash
ssh_options=(-o StrictHostKeyChecking=no) if [[ "$backup_host" == *":"* ]]; then port="${backup_host##*:}" backup_host="${backup_host%%:*}" ssh_options+=(-p "$port") else port="" backup_host="${backup_host%%:*}" fi
rsync_opts=( -e "ssh ${ssh_options[*]} -i /var/lib/rsync/.ssh/id_rsa -l rsync" -azvArH --delete --delete-excluded --partial --partial-dir .rsync-partial --numeric-ids ) if [ "$DRY_RUN" ]; then rsync_opts+=("-n") fi cmd=( rsync "${rsync_opts[@]}" "$backup_host":"/var/mirror/$ident/$src" "${dst}" ) echo "${WHITE}Launching: ${NORMAL} ${cmd[@]}" "${cmd[@]}" }
nextcloud:src:version() { local version if ! version=$(cat "/srv/datastore/data/${nextcloud_service}/var/www/html/version.php" 2>/dev/null); then err "Can't find version.php file to get last version installed." exit 1 fi version=$(e "$version" | grep 'VersionString =' | cut -f 3 -d ' ' | cut -f 2 -d "'") if [ -z "$version" ]; then err "Can't figure out version from version.php content." exit 1 fi echo "$version" }
container:health:check-fix:container-aliveness() { local container_id="$1"
timeout 5s docker inspect "$container_id" >/dev/null 2>&1 errlvl=$? if [ "$errlvl" == 124 ]; then service_name=$(docker ps --filter id="$container_id" --format '{{.Label "com.docker.compose.service"}}') container_name=($(docker ps --filter id="$container_id" --format '{{.Names}}')) pid=$(ps ax -o pid,command -ww | grep docker-containerd-shim | grep "/$container_id" | sed -r 's/^ *//g' | cut -f 1 -d " ") if [ -z "$pid" ]; then err "container ${DARKCYAN}${container_name[0]}${NORMAL} for ${DARKYELLOW}$service_name${NORMAL} doesn't answer to 'inspect' command. Can't find its PID neither." return 1 fi echo "container ${DARKCYAN}${container_name[0]}${NORMAL} for ${DARKYELLOW}$service_name${NORMAL} doesn't answer to 'inspect' command (pid: $pid)." Wrap -d "kill pid $pid and restart" <<EOF kill "$pid" sleep 2 docker restart "$container_id" EOF fi return $errlvl }
container:health:check-fix:no-matching-entries() { local container_id="$1"
out=$(docker exec "$container_id" echo 2>&1) errlvl=$? [ "$errlvl" == 0 ] && return 0 service_name=$(docker ps --filter id="$container_id" --format '{{.Label "com.docker.compose.service"}}') container_name=($(docker ps --filter id="$container_id" --format '{{.Names}}')) if [ "$errlvl" == 126 ] && [[ "$out" == *"no matching entries in passwd file"* ]]; then echo "container ${DARKCYAN}${container_name[0]}${NORMAL} for ${DARKYELLOW}$service_name${NORMAL} has ${DARKRED}no-matching-entries${NORMAL} bug." >&2 Wrap -d "restarting container of ${DARKYELLOW}$service_name${NORMAL} twice" <<EOF docker restart "$container_id" sleep 2 docker restart "$container_id" EOF return $errlvl fi warn "Unknown issue with ${DARKYELLOW}$service_name${NORMAL}'s container:" echo " ${WHITE}cmd:${NORMAL} docker exec -ti $container_id echo" >&2 echo "$out" | prefix " ${DARKGRAY}|${NORMAL} " >&2 echo " ${DARKGRAY}..${NORMAL} leaving this as-is." return $errlvl }
docker:api() { local endpoint="$1" curl -sS --unix-socket /var/run/docker.sock "http://localhost$endpoint" }
docker:containers:id() { docker:api /containers/json | jq -r ".[] | .Id" } docker:containers:names() { docker:api /containers/json | jq -r '.[] | .Names[0] | ltrimstr("/")' }
docker:container:stats() { container="$1" docker:api "/containers/$container/stats?stream=false" }
docker:containers:stats() { :cache: scope=session
local jobs='' line container id_names sha names name data service project local DC="com.docker.compose" local PSF_values=( ".ID" ".Names" ".Label \"$DC.project\"" ".Label \"$DC.service\"" ".Image" ) local PSF="$(printf "{{%s}} " "${PSF_values[@]}")" id_names=$(docker ps -a --format="$PSF") || return 1
## Create a docker container table from name/sha to service, project, image_name declare -A resolve while read-0a line; do sha=${line%% *}; line=${line#* } names=${line%% *}; line=${line#* } names=(${names//,/ }) for name in "${names[@]}"; do resolve["$name"]="$line" done resolve["$sha"]="$line" done < <(printf "%s\n" "$id_names")
declare -A data while read-0a line; do name=${line%% *}; line=${line#* } ts=${line%% *}; line=${line#* }
resolved="${resolve["$name"]}" project=${resolved%% *}; resolved=${resolved#* } service=${resolved%% *}; resolved=${resolved#* } image_name="$resolved" if [ -z "$service" ]; then project="@" service=$(docker inspect "$image_name" | jq -r '.[0].RepoTags[0]') service=${service//\//_} fi if [ -n "${data["$project/$service"]}" ]; then previous=(${data["$project/$service"]}) previous=(${previous[@]:1}) current=($line) sum=() i=0; max=${#previous[@]} while (( i < max )); do sum+=($((${previous[$i]} + ${current[$i]}))) ((i++)) done data["$project/$service"]="$ts ${sum[*]}" else data["$project/$service"]="$ts $line" fi done < <( for container in "$@"; do ( docker:container:stats "${container}" | jq -r ' (.name | ltrimstr("/")) + " " + (.read | sub("\\.[0-9]+Z"; "Z") | fromdate | tostring) + " " + (.memory_stats.usage | tostring) + " " + (.memory_stats.stats.inactive_file | tostring) + " " + ((.memory_stats.usage - .memory_stats.stats.inactive_file) | tostring) + " " + (.memory_stats.limit | tostring) + " " + (.networks.eth0.rx_bytes | tostring) + " " + (.networks.eth0.rx_packets | tostring) + " " + (.networks.eth0.rx_errors | tostring) + " " + (.networks.eth0.rx_dropped | tostring) + " " + (.networks.eth0.tx_bytes | tostring) + " " + (.networks.eth0.tx_packets | tostring) + " " + (.networks.eth0.tx_errors | tostring) + " " + (.networks.eth0.tx_dropped | tostring) ' ) & jobs=1 done [ -n "$jobs" ] && wait ) for label in "${!data[@]}"; do echo "$label ${data[$label]}" done } decorator._mangle_fn docker:containers:stats export -f docker:containers:stats
col:normalize:size() { local alignment=$1
awk -v alignment="$alignment" '{ # Store the entire line in the lines array. lines[NR] = $0;
# Split the line into fields. split($0, fields);
# Update max for each field. for (i = 1; i <= length(fields); i++) { if (length(fields[i]) > max[i]) { max[i] = length(fields[i]); } } } END { # Print lines with fields padded to max. for (i = 1; i <= NR; i++) { split(lines[i], fields);
line = ""; for (j = 1; j <= length(fields); j++) { # Get alignment for the current field. align = substr(alignment, j, 1); if (align != "+") { align = "-"; # Default to left alignment if not "+". }
line = line sprintf("%" align max[j] "s ", fields[j]); } print line; } }' }
rrd:create() { local prefix="$1" shift local label="$1" step="300" src_def shift if [ -z "$VAR_DIR" ]; then err "Unset \$VAR_DIR, can't create rrd graph" return 1 fi mkdir -p "$VAR_DIR" if ! [ -d "$VAR_DIR" ]; then err "Invalid \$VAR_DIR: '$VAR_DIR' is not a directory" return 1 fi if ! type -p rrdtool >/dev/null 2>&1; then apt-get install rrdtool -y --force-yes </dev/null if ! type -p rrdtool 2>/dev/null 2>&1; then err "Couldn't find nor install 'rrdtool'." return 1 fi fi
local RRD_PATH="$VAR_DIR/rrd"
local RRD_FILE="$RRD_PATH/$prefix/$label.rrd" mkdir -p "${RRD_FILE%/*}" if [ -f "$RRD_FILE" ]; then err "File '$RRD_FILE' already exists, use a different label." return 1 fi
local rrd_ds_opts=() for src_def in "$@"; do IFS=":" read -r name type min max rra_types <<<"$src_def" rra_types=${rra_types:-average,max,min} rrd_ds_opts+=("DS:$name:$type:900:$min:$max") done
local step=120 local times=( ## with steps 120 is 2mn datapoint 2m:1w 6m:3w 30m:12w 3h:1y 1d:10y 1w:2080w ) rrd_rra_opts=() for time in "${times[@]}"; do rrd_rra_opts+=("RRA:"{AVERAGE,MIN,MAX}":0.5:$time") done cmd=( rrdtool create "$RRD_FILE" \ --step "$step" \ "${rrd_ds_opts[@]}" \ "${rrd_rra_opts[@]}" )
"${cmd[@]}" || { err "Failed command: ${cmd[@]}" return 1 } }
rrd:update() { local prefix="$1" shift while read-0a data; do [ -z "$data" ] && continue IFS="~" read -ra data <<<"${data// /\~}" label="${data[0]}" ts="${data[1]}" for arg in "$@"; do IFS="|" read -r name arg <<<"$arg" rrd_label="${label}/${name}" rrd_create_opt=() rrd_update_opt="$ts" for col_def in ${arg//,/ }; do col=${col_def%%:*}; create_def=${col_def#*:} rrd_update_opt="${rrd_update_opt}:${data[$col]}" rrd_create_opt+=("$create_def") done local RRD_ROOT_PATH="$VAR_DIR/rrd" local RRD_PATH="$RRD_ROOT_PATH/${prefix%/}" local RRD_FILE="${RRD_PATH%/}/${rrd_label#/}.rrd" if ! [ -f "$RRD_FILE" ]; then info "Creating new RRD file '${RRD_FILE#$RRD_ROOT_PATH/}'" if ! rrd:create "$prefix" "${rrd_label}" "${rrd_create_opt[@]}" </dev/null ; then err "Couldn't create new RRD file ${rrd_label} with options: '${rrd_create_opt[*]}'" return 1 fi fi rrdtool update "$RRD_FILE" "$rrd_update_opt" || { err "update failed with options: '$rrd_update_opt'" return 1 } done done }
[ "$SOURCED" ] && return 0
## ## Command line processing ##
cmdline.spec.gnu cmdline.spec.reporting
cmdline.spec.gnu install
cmdline.spec::cmd:install:run() { : }
cmdline.spec.gnu get-type cmdline.spec::cmd:get-type:run() { vps:get-type }
cmdline.spec:install:cmd:backup:run() {
: :posarg: BACKUP_SERVER 'Target backup server'
: :optfla: --ignore-domain-check \ "Allow to bypass the domain check in compose file (only used in compose installation)." : :optfla: --ignore-ping-check "Allow to bypass the ping check of host."
local vps_type
vps_type=$(vps:get-type) || { err "Failed to get type of installation." return 1 } if ! fn.exists "${vps_type}:install-backup"; then err "type '${vps_type}' has no backup installation implemented yet." return 1 fi
opts=()
[ "$opt_ignore_ping_check" ] && opts+=("--ignore-ping-check")
if [ "$vps_type" == "compose" ]; then [ "$opt_ignore_domain_check" ] && opts+=("--ignore-domain-check") fi
"cmdline.spec:install:cmd:${vps_type}-backup:run" "${opts[@]}" "$BACKUP_SERVER" }
DEFAULT_BACKUP_SERVICE_NAME=rsync-backup cmdline.spec.gnu compose-backup cmdline.spec:install:cmd:compose-backup:run() {
: :posarg: BACKUP_SERVER 'Target backup server'
: :optval: --service-name,-s "YAML service name in compose file to check for existence of key. Defaults to '$DEFAULT_BACKUP_SERVICE_NAME'" : :optval: --compose-file,-f "Compose file location. Defaults to the value of '\$DEFAULT_COMPOSE_FILE'"
: :optfla: --ignore-domain-check \ "Allow to bypass the domain check in compose file." : :optfla: --ignore-ping-check "Allow to bypass the ping check of host."
local service_name compose_file
[ -e "/etc/compose/local.conf" ] && source /etc/compose/local.conf
compose_file=${opt_compose_file:-$DEFAULT_COMPOSE_FILE} service_name=${opt_service_name:-$DEFAULT_BACKUP_SERVICE_NAME}
if ! [ -e "$compose_file" ]; then err "Compose file not found in '$compose_file'." return 1 fi
compose:install-backup "$BACKUP_SERVER" "$service_name" "$compose_file" \ "$opt_ignore_ping_check" "$opt_ignore_domain_check"
}
cmdline.spec:install:cmd:mailcow-backup:run() {
: :posarg: BACKUP_SERVER 'Target backup server'
: :optfla: --ignore-ping-check "Allow to bypass the ping check of host."
"mailcow:install-backup" "$BACKUP_SERVER" "$opt_ignore_ping_check" }
cmdline.spec.gnu backup cmdline.spec::cmd:backup:run() {
local vps_type vps_type=$(vps:get-type) || { err "Failed to get type of installation." return 1 } if ! fn.exists "cmdline.spec:backup:cmd:${vps_type}:run"; then err "type '${vps_type}' has no backup process implemented yet." return 1 fi
"cmdline.spec:backup:cmd:${vps_type}:run" }
cmdline.spec:backup:cmd:mailcow:run() {
local cmd_line cron_line cmd
for f in mysql-backup mirror-dir; do [ -e "/etc/cron.d/$f" ] || { err "Can't find '/etc/cron.d/$f'." echo " Have you forgotten to run 'vps install backup BACKUP_HOST' ?" >&2 return 1 }
if ! cron_line=$(cat "/etc/cron.d/$f" | grep -v "^#" | grep "\* \* \*"); then err "Can't find cron_line in '/etc/cron.d/$f'." \ "Have you modified it ?" return 1 fi
cron_line=${cron_line%|*} cmd_line=(${cron_line#*root})
if [ "$f" == "mirror-dir" ]; then cmd=() for arg in "${cmd_line[@]}"; do [ "$arg" != "-q" ] && cmd+=("$arg") done else cmd=("${cmd_line[@]}") fi
code="${cmd[*]}" echo "${WHITE}Launching:${NORMAL} ${code}" { { ( ## Some commands are using colors that are already ## set by this current program and will trickle ## down unwantedly ansi_color no eval "${code}" ) | sed -r "s/^/ ${GRAY}|${NORMAL} /g" set_errlvl "${PIPESTATUS[0]}" } 3>&1 1>&2 2>&3 | sed -r "s/^/ $DARKRED\!$NORMAL /g" set_errlvl "${PIPESTATUS[0]}" } 3>&1 1>&2 2>&3
if [ "$?" != "0" ]; then err "Failed." return 1 fi done info "Mysql backup and subsequent mirror-dir ${DARKGREEN}succeeded${NORMAL}." }
set_errlvl() { return "${1:-1}"; }
cmdline.spec:backup:cmd:compose:run() {
local cron_line args project_name=$(compose:project_name) || return 1 docker_cmd=$(compose:get_cron_docker_cmd) || return 1
echo "${WHITE}Launching:${NORMAL} docker exec -i "${project_name}_cron_1" $docker_cmd"
{ { eval "docker exec -i \"${project_name}_cron_1\" $docker_cmd" | sed -r "s/^/ ${GRAY}|${NORMAL} /g" set_errlvl "${PIPESTATUS[0]}" } 3>&1 1>&2 2>&3 | sed -r "s/^/ $DARKRED\!$NORMAL /g" set_errlvl "${PIPESTATUS[0]}" } 3>&1 1>&2 2>&3
if [ "$?" != "0" ]; then err "Failed." return 1 fi info "mirror-dir ${DARKGREEN}succeeded${NORMAL}."
}
cmdline.spec.gnu recover-target cmdline.spec::cmd:recover-target:run() {
: :posarg: BACKUP_DIR 'Source directory on backup side' : :posarg: HOST_DIR 'Target directory on host side'
: :optval: --backup-host,-B "The backup host"
: :optfla: --dry-run,-n "Don't do anything, instead tell what it would do."
## if no backup host take the one by default backup_host="$opt_backup_host" if [ -z "$backup_host" ]; then backup_host_ident=$(backup-action get_default_backup_host_ident) || return 1 read -r backup_host ident <<<"$backup_host_ident" fi
if [[ "$BACKUP_DIR" == /* ]]; then err "BACKUP_DIR must be a relative path from the root of your backup." return 1 fi
REAL_HOST_DIR=$(realpath "$HOST_DIR") || { err "Can't find HOST_DIR '$HOST_DIR'." return 1 } export DRY_RUN="${opt_dry_run}" backup-action recover-target "$backup_host" "$ident" "$BACKUP_DIR" "$REAL_HOST_DIR" }
cmdline.spec.gnu odoo
cmdline.spec::cmd:odoo:run() { : }
cmdline.spec.gnu restart cmdline.spec:odoo:cmd:restart:run() {
: :optval: --service,-s "The service (defaults to 'odoo')"
local out odoo_service
odoo_service="${opt_service:-odoo}" project_name=$(compose:project_name) || return 1
if ! out=$(docker restart "${project_name}_${odoo_service}_1" 2>&1); then if [[ "$out" == *"no matching entries in passwd file" ]]; then warn "Catched docker bug. Restarting once more." if ! out=$(docker restart "${project_name}_${odoo_service}_1"); then err "Can't restart container ${project_name}_${odoo_service}_1 (restarted twice)." echo " output:" >&2 echo "$out" | prefix " ${GRAY}|${NORMAL} " >&2 exit 1 fi else err "Couldn't restart container ${project_name}_${odoo_service}_1 (and no restart bug detected)." exit 1 fi fi info "Container ${project_name}_${odoo_service}_1 was ${DARKGREEN}successfully${NORMAL} restarted."
}
cmdline.spec.gnu restore cmdline.spec:odoo:cmd:restore:run() {
: :posarg: ZIP_DUMP_LOCATION 'Source odoo dump file to restore (can be a local file or an url)'
: :optval: --service,-s "The service (defaults to 'odoo')" : :optval: --database,-d 'Target database (default if not specified)'
local out
odoo_service="${opt_service:-odoo}"
if [[ "$ZIP_DUMP_LOCATION" == "http://"* ]] || [[ "$ZIP_DUMP_LOCATION" == "https://"* ]]; then settmpdir ZIP_TMP_LOCATION tmp_location="$ZIP_TMP_LOCATION/dump.zip" curl -k -s -L "$ZIP_DUMP_LOCATION" > "$tmp_location" || { err "Couldn't get '$ZIP_DUMP_LOCATION'." exit 1 } if [[ "$(dd if="$tmp_location" count=2 bs=1 2>/dev/null)" != "PK" ]]; then err "Download doesn't seem to be a zip file." dd if="$tmp_location" count=1 bs=256 | hd | prefix " ${GRAY}|${NORMAL} " >&2 exit 1 fi info "Successfully downloaded '$ZIP_DUMP_LOCATION'" echo " in '$tmp_location'." >&2 ZIP_DUMP_LOCATION="$tmp_location" fi
[ -e "$ZIP_DUMP_LOCATION" ] || { err "No file '$ZIP_DUMP_LOCATION' found." >&2 exit 1 }
#cmdline.spec:odoo:cmd:restart:run --service "$odoo_service" || exit 1
msg_dbname=default [ -n "$opt_database" ] && msg_dbname="'$opt_database'" compose --no-hooks drop "$odoo_service" $opt_database || { err "Error dropping $msg_dbname database of service ${DARKYELLOW}$odoo_service${NORMAL}." exit 1 } compose --no-hooks load "$odoo_service" $opt_database < "$ZIP_DUMP_LOCATION" || { err "Error restoring service ${DARKYELLOW}$odoo_service${NORMAL} to $msg_dbname database." exit 1 }
info "Successfully restored ${DARKYELLOW}$odoo_service${NORMAL} to $msg_dbname database."
## Restart odoo, ensure there is no bugs lingering on it. cmdline.spec:odoo:cmd:restart:run --service "$odoo_service" || exit 1
}
cmdline.spec.gnu dump cmdline.spec:odoo:cmd:dump:run() {
: :posarg: DUMP_ZIPFILE 'Target path to store odoo dump zip file.' : :optval: --database,-d 'Target database (default if not specified)' : :optval: --service,-s "The service (defaults to 'odoo')"
odoo_service="${opt_service:-odoo}"
msg_dbname=default [ -n "$opt_database" ] && msg_dbname="'$opt_database'" compose --no-hooks save "$odoo_service" $opt_database > "$DUMP_ZIPFILE" || { err "Error dumping ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database to '$DUMP_ZIPFILE'." exit 1 }
info "Successfully dumped ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database to '$DUMP_ZIPFILE'."
}
cmdline.spec.gnu drop cmdline.spec:odoo:cmd:drop:run() {
: :optval: --database,-d 'Target database (default if not specified)' : :optval: --service,-s "The service (defaults to 'odoo')"
odoo_service="${opt_service:-odoo}"
msg_dbname=default [ -n "$opt_database" ] && msg_dbname="'$opt_database'" compose --no-hooks drop "$odoo_service" $opt_database || { err "Error dropping ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database." exit 1 }
info "Successfully dropped ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database."
}
cmdline.spec.gnu set-cyclos-url cmdline.spec:odoo:cmd:set-cyclos-url:run() {
: :optval: --database,-d "Target database ('odoo' if not specified)" : :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
local URL
dbname=${opt_database:-odoo} cyclos_service="${opt_service:-cyclos}" project_name=$(compose:project_name) || exit 1 URL=$(compose:get_url "${project_name}" "${cyclos_service}") || exit 1
Wrap -d "set cyclos url to '$URL'" <<EOF || exit 1 echo "UPDATE res_company SET cyclos_server_url = '$URL/api' WHERE id=1;" | compose:psql "$project_name" "$dbname" || { err "Failed to set cyclos url value in '$dbname' database." exit 1 } EOF
}
cmdline.spec.gnu fix-sso cmdline.spec:odoo:cmd:fix-sso:run() {
: :optval: --database,-d "Target database ('odoo' if not specified)"
local public_user_id project_name dbname
dbname=${opt_database:-odoo} project_name=$(compose:project_name) || exit 1 public_user_id=$(odoo:get_public_user_id "${project_name}" "${dbname}") || exit 1 Wrap -d "fix website's object to 'public_user' (id=$public_user_id)" <<EOF || exit 1 echo "UPDATE website SET user_id = $public_user_id;" | compose:psql "$project_name" "$dbname" || { err "Failed to set website's object user_id to public user's id ($public_user_id) in '$dbname' database." exit 1 } EOF
}
cmdline.spec.gnu cyclos
cmdline.spec::cmd:cyclos:run() { : }
cmdline.spec:cyclos:cmd:dump:run() {
: :posarg: DUMP_GZFILE 'Target path to store odoo dump gz file.'
: :optval: --database,-d "Target database ('cyclos' if not specified)" : :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
cyclos_service="${opt_service:-cyclos}" cyclos_database="${opt_database:-cyclos}" project_name=$(compose:project_name) || exit 1 container_id=$(compose:service:container_one "$project_name" "${cyclos_service}") || exit 1
Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \ docker stop "$container_id" || exit 1
Wrap -d "Dump postgres database '${cyclos_database}'." -- \ postgres:dump "${project_name}" "$cyclos_database" "$DUMP_GZFILE" || exit 1
Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \ docker start "${container_id}" || exit 1 }
cmdline.spec.gnu restore cmdline.spec:cyclos:cmd:restore:run() {
: :posarg: GZ_DUMP_LOCATION 'Source cyclos dump file to restore (can be a local file or an url)'
: :optval: --service,-s "The service (defaults to 'cyclos')" : :optval: --database,-d 'Target database (default if not specified)'
local out
cyclos_service="${opt_service:-cyclos}" cyclos_database="${opt_database:-cyclos}" project_name=$(compose:project_name) || exit 1 url=$(compose:get_url "${project_name}" "${cyclos_service}") || return 1 container_id=$(compose:service:container_one "$project_name" "${cyclos_service}") || exit 1
if [[ "$GZ_DUMP_LOCATION" == "http://"* ]] || [[ "$GZ_DUMP_LOCATION" == "https://"* ]]; then settmpdir GZ_TMP_LOCATION tmp_location="$GZ_TMP_LOCATION/dump.gz"
Wrap -d "get '$GZ_DUMP_LOCATION'" <<EOF || exit 1
## Note that curll version before 7.76.0 do not have curl -k -s -L "$GZ_DUMP_LOCATION" --fail \\ > "$tmp_location" || { echo "Error fetching ressource. Is url correct ?" >&2 exit 1 }
if [[ "\$(dd if="$tmp_location" count=2 bs=1 2>/dev/null | hexdump -v -e "/1 \"%02x\"")" != "1f8b" ]]; then err "Download doesn't seem to be a gzip file." dd if="$tmp_location" count=1 bs=256 | hd | prefix " ${GRAY}|${NORMAL} " >&2 exit 1 fi EOF GZ_DUMP_LOCATION="$tmp_location" fi
[ -e "$GZ_DUMP_LOCATION" ] || { err "No file '$GZ_DUMP_LOCATION' found." >&2 exit 1 }
Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \ docker stop "$container_id" || exit 1
## XXXvlab: making the assumption that the postgres username should ## be the same as the cyclos service selected (which is the default, ## but not always the case). Wrap -d "restore postgres database '${cyclos_database}'." -- \ postgres:restore "$project_name" "$GZ_DUMP_LOCATION" "${cyclos_service}@${cyclos_database}" || exit 1 ## ensure that the database is not locked
Wrap -d "check and remove database lock if any" -- \ cyclos:unlock "${project_name}" "${cyclos_database}" || exit 1
Wrap -d "set root url to '$url'" -- \ cyclos:set_root_url "${project_name}" "${cyclos_database}" "${url}" || exit 1
Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \ docker start "${container_id}" || exit 1
}
cmdline.spec.gnu set-root-url cmdline.spec:cyclos:cmd:set-root-url:run() {
: :optval: --database,-d "Target database ('cyclos' if not specified)" : :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
local URL
cyclos_database=${opt_database:-cyclos} cyclos_service="${opt_service:-cyclos}" project_name=$(compose:project_name) || exit 1 url=$(compose:get_url "${project_name}" "${cyclos_service}") || exit 1 container_id=$(compose:service:container_one "${project_name}" "${cyclos_service}") || exit 1
Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \ docker stop "$container_id" || exit 1
Wrap -d "set root url to '$url'" -- \ cyclos:set_root_url "${project_name}" "${cyclos_database}" "${url}" || exit 1
Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \ docker start "${container_id}" || exit 1
}
cmdline.spec.gnu unlock cmdline.spec:cyclos:cmd:unlock:run() {
: :optval: --database,-d "Target database ('cyclos' if not specified)" : :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
local URL
cyclos_database=${opt_database:-cyclos} cyclos_service="${opt_service:-cyclos}" project_name=$(compose:project_name) || exit 1 container_id=$(compose:service:container_one "${project_name}" "${cyclos_service}") || exit 1
Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \ docker stop "$container_id" || exit 1
Wrap -d "check and remove database lock if any" -- \ cyclos:unlock "${project_name}" "${cyclos_database}" || exit 1
Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \ docker start "${container_id}" || exit 1
}
cmdline.spec.gnu rocketchat
cmdline.spec::cmd:rocketchat:run() { : }
cmdline.spec.gnu drop-indexes cmdline.spec:rocketchat:cmd:drop-indexes:run() {
: :optval: --database,-d "Target database ('rocketchat' if not specified)" : :optval: --service,-s "The rocketchat service name (defaults to 'rocketchat')"
local URL
rocketchat_database=${opt_database:-rocketchat} rocketchat_service="${opt_service:-rocketchat}" project_name=$(compose:project_name) || exit 1 container_id=$(compose:service:container_one "${project_name}" "${rocketchat_service}") || exit 1
Wrap -d "stop ${DARKYELLOW}${rocketchat_service}${NORMAL}'s container" -- \ docker stop "$container_id" || exit 1
errlvl=0 Wrap -d "drop indexes" -- \ rocketchat:drop-indexes "${project_name}" "${rocketchat_database}" || { errlvl=1 errmsg="Failed to drop indexes" } Wrap -d "start ${DARKYELLOW}${rocketchat_service}${NORMAL}'s container" -- \ docker start "${container_id}" || exit 1
if [ "$errlvl" != 0 ]; then err "$errmsg" fi exit "$errlvl" }
cmdline.spec.gnu nextcloud
cmdline.spec::cmd:nextcloud:run() { : }
cmdline.spec.gnu upgrade cmdline.spec:nextcloud:cmd:upgrade:run() {
: :posarg: [TARGET_VERSION] "Target version to migrate to" : :optval: --service,-s "The nexcloud service name (defaults to 'nextcloud')"
local URL
nextcloud_service="${opt_service:-nextcloud}" project_name=$(compose:project_name) || exit 1 containers=$(compose:service:containers "${project_name}" "${nextcloud_service}") || exit 1
container_stopped=() if [ -n "$containers" ]; then for container in $containers; do Wrap -d "stop ${DARKYELLOW}${nextcloud_service}${NORMAL}'s container" -- \ docker stop "$container" || { err "Failed to stop container '$container'." exit 1 } container_stopped+=("$container") done fi before_version=$(nextcloud:src:version) || exit 1
## -q to remove the display of ``compose`` related information ## like relation resolution. ## --no-hint to remove the final hint about modifying your ## ``compose.yml``. compose -q upgrade "$nextcloud_service" --no-hint "$TARGET_VERSION" errlvl="$?"
after_version=$(nextcloud:src:version) if [ "$after_version" != "$before_version" ]; then desc="update \`compose.yml\` to set ${DARKYELLOW}$nextcloud_service${NORMAL}'s " desc+="docker image to actual code version ${WHITE}${after_version}${NORMAL}" Wrap -d "$desc" -- \ compose:file:value-change \ "${nextcloud_service}.docker-compose.image" \ "docker.0k.io/nextcloud:${after_version}-myc" || exit 1 fi
if [ "$errlvl" == 0 ]; then echo "${WHITE}Launching final compose${NORMAL}" compose up || exit 1 fi
exit "$errlvl" }
cmdline.spec.gnu check-fix cmdline.spec::cmd:check-fix:run() {
: :posarg: [SERVICES...] "Optional service to check" : :optval: --check,-c "Specify a check or a list of checks separated by commas" : :optfla: --silent,-s "Don't ouput anything if everything goes well"
local project_name service_name containers container check
all_checks=$(declare -F | egrep '^declare -fx? container:health:check-fix:[^ ]+$' | cut -f 4 -d ":") checks=(${opt_check//,/ }) for check in "${checks[@]}"; do fn.exists container:health:check-fix:$check || { err "check '$check' not found." return 1 } done if [ "${#checks[*]}" == 0 ]; then checks=($all_checks) fi ## XXXvlab: could make it parallel project_name=$(compose:project_name) || exit 1 containers=($(compose:project:containers "${project_name}")) || exit 1
found= for container in "${containers[@]}"; do service_name=$(docker ps --filter id="$container" --format '{{.Label "com.docker.compose.service"}}') if [ "${#SERVICES[@]}" -gt 0 ]; then [[ " ${SERVICES[*]} " == *" $service_name "* ]] || continue fi found=1 one_bad= for check in "${checks[@]}"; do if ! container:health:check-fix:"$check" "$container"; then one_bad=1 fi done if [ -z "$opt_silent" ] && [ -z "$one_bad" ]; then Elt "containers have been checked for ${DARKYELLOW}$service_name${NORMAL}" Feedback fi done if [ -z "$found" ]; then if [ -z "$opt_silent" ]; then if [ "${#SERVICES[@]}" -gt 0 ]; then warn "No container for given services found in current project '$project_name'." else warn "No container found for current project '$project_name'." fi fi return 1 fi }
awk:require() { local require_at_least="$1" version already_installed while true; do if ! version=$(awk --version 2>/dev/null); then version="" else version=${version%%,*} version=${version##* } fi if [ -z "$version" ] || version_gt "$require_at_least" "$version"; then if [ -z "$already_installed" ]; then if [ -z "$version" ]; then info "No 'gawk' available, probably using a clone. Installing 'gawk'..." else info "Found gawk version '$version'. Updating 'gawk'..." fi apt-get install gawk -y </dev/null || { err "Failed to install 'gawk'." return 1 } already_installed=true else if [ -z "$version" ]; then err "No 'gawk' available even after having installed one" else err "'gawk' version '$version' is lower than required" \ "'$require_at_least' even after updating 'gawk'." fi return 1 fi continue fi return 0 done }
cmdline.spec.gnu stats cmdline.spec::cmd:stats:run() {
: :optval: --format,-f "Either 'silent', 'raw', or 'pretty', default is pretty." : :optfla: --silent,-s "Shorthand for '--format silent'" : :optval: --resource,-r 'resource(s) separated with a comma'
local project_name service_name containers container check
if [[ -n "${opt_silent}" ]]; then if [[ -n "${opt_format}" ]]; then err "'--silent' conflict with option '--format'." return 1 fi opt_format=s fi opt_format="${opt_format:-pretty}" case "${opt_format}" in raw|r) opt_format="raw" : ;; silent|s) opt_format="silent" ;; pretty|p) opt_format="pretty" awk:require 4.1.4 || return 1 ;; *) err "Invalid value '$opt_format' for option --format" echo " use either 'raw' (shorthand 'r'), 'silent' (shorthand 's') or pretty (shorthand 'p')." >&2 return 1 esac
local resources=(c.{memory,network} load_avg) if [ -n "${opt_resource}" ]; then resources=(${opt_resource//,/ }) fi
local not_found=() for resource in "${resources[@]}"; do if ! fn.exists "stats:$resource"; then not_found+=("$resource") fi done
if [[ "${#not_found[@]}" -gt 0 ]]; then not_found_msg=$(printf "%s, " "${not_found[@]}") not_found_msg=${not_found_msg%, } err "Unsupported resource(s) provided: ${not_found_msg}" echo " resource must be one-of:" >&2 declare -F | egrep -- '-fx? stats:[a-zA-Z0-9_.]+$' | cut -f 3- -d " " | cut -f 2- -d ":" | prefix " - " >&2 return 1 fi
:state-dir:
for resource in "${resources[@]}"; do if [ "$opt_format" == "pretty" ]; then echo "${WHITE}$resource${NORMAL}:" stats:"$resource" "$opt_format" 2>&1 | prefix " " else stats:"$resource" "$opt_format" 2>&1 | prefix "$resource " fi set_errlvl "${PIPESTATUS[0]}" || return 1 done }
stats:c.memory() { local format="$1" local out container_to_check=($(docker:running_containers)) || exit 1 out=$(docker:containers:stats "${container_to_check[@]}") printf "%s\n" "$out" | rrd:update "containers" "memory|3:usage:GAUGE:U:U,4:inactive:GAUGE:U:U" || { return 1 } case "${format:-p}" in raw|r) printf "%s\n" "$out" | cut -f 1-5 -d " " ;; pretty|p) awk:require 4.1.4 || return 1 { echo "container" "__total____" "buffered____" "resident____" printf "%s\n" "$out" | awk ' { offset = strftime("%z", $2); print $1, substr($0, index($0,$3)); }' | cut -f 1-4 -d " " | numfmt --field 2-4 --to=iec-i --format=%8.1fB | sed -r 's/(\.[0-9])([A-Z]?iB)/\1:\2/g' | sort } | col:normalize:size -+++ | sed -r 's/(\.[0-9]):([A-Z]?iB)/\1 \2/g' | header:make ;; esac }
stats:c.network() { local format="$1" local out container_to_check=($(docker:running_containers)) || exit 1 out=$(docker:containers:stats "${container_to_check[@]}") cols=( {rx,tx}_{bytes,packets,errors,dropped} ) idx=5 ## starting column idx for next fields defs=() for col in "${cols[@]}"; do defs+=("$((idx++)):${col}:COUNTER:U:U") done OLDIFS="$IFS" IFS="," defs="${defs[*]}" IFS="$OLDIFS" printf "%s\n" "$out" | rrd:update "containers" \ "network|${defs}" || { return 1 } case "${format:-p}" in raw|r) printf "%s\n" "$out" | cut -f 1,2,7- -d " " ;; pretty|p) awk:require 4.1.4 || return 1 { echo "container" "_" "_" "_" "RX" "_" "_" "_" "TX" echo "_" "__bytes____" "__packets" "__errors" "__dropped" "__bytes____" "__packets" "__errors" "__dropped" printf "%s\n" "$out" | awk ' { offset = strftime("%z", $2); print $1, substr($0, index($0,$7)); }' | numfmt --field 2,6 --to=iec-i --format=%8.1fB | numfmt --field 3,4,5,7,8,9 --to=si --format=%8.1f | sed -r 's/(\.[0-9])([A-Z]?(iB|B)?)/\1:\2/g' | sort } | col:normalize:size -++++++++ | sed -r ' s/(\.[0-9]):([A-Z]?iB)/\1 \2/g; s/(\.[0-9]):([KMGTPE])/\1 \2/g; s/ ([0-9]+)\.0:B/\1 /g; s/ ([0-9]+)\.0:/\1 /g; ' | header:make 2 ;; esac }
header:make() { local nb_line="${1:-1}" local line while ((nb_line-- > 0)); do read-0a line echo "${GRAY}$(printf "%s" "$line" | sed -r 's/_/ /g')${NORMAL}" done cat }
stats:load_avg() { local format="$1" local out out=$(host:sys:load_avg) printf "%s\n" "$out" | rrd:update "" "load_avg|2:load_avg_1:GAUGE:U:U,3:load_avg_5:GAUGE:U:U,4:load_avg_15:GAUGE:U:U" || { return 1 } case "${format:-p}" in raw|r) printf "%s\n" "$out" | cut -f 2-5 -d " " ;; pretty|p) { echo "___1m" "___5m" "__15m" printf "%s\n" "$out" | cut -f 3-5 -d " " } | col:normalize:size +++ | header:make ;; esac }
host:sys:load_avg() { local uptime uptime="$(uptime)" uptime=${uptime##*: } uptime=${uptime//,/} printf "%s " "" "$(date +%s)" "$uptime" }
cmdline.spec.gnu mongo
cmdline.spec::cmd:mongo:run() { : }
cmdline.spec.gnu upgrade cmdline.spec:mongo:cmd:upgrade:run() {
: :posarg: [TARGET_VERSION] "Target version to migrate to" : :optval: --service,-s "The mongo service name (defaults to 'mongo')" : :optfla: --debug,-d "Display debugging information"
local URL
mongo_service="${opt_service:-mongo}" available_actions=$(compose --get-available-actions) || exit 1 available_actionable_services=($(e "$available_actions" | yq 'keys().[]')) if [[ " ${available_actionable_services[*]} " != *" $mongo_service "* ]]; then err "Service '$mongo_service' was not found in current 'compose.yml'." exit 1 fi opts_compose=() if [ -n "$opt_debug" ]; then opts_compose+=("--debug") else opts_compose+=("-q") fi
project_name=$(compose:project_name) || exit 1 containers="$(compose:service:containers "${project_name}" "${mongo_service}")" || exit 1
## XXXvlab: quick hack, to make more beautiful later cron_container=$(compose:service:containers "${project_name}" "cron") containers="$containers $cron_container" docker stop "$cron_container" >/dev/null 2>&1 || true
before_version= uptodate= upgraded= msgerr=() while read-0a-err errlvl line; do echo "$line" rline=$(printf "%s" "$line" | sed_compat "s/$__color_sequence_regex//g") case "$rline" in "II Current mongo version: "*) before_version="${rline#II Current mongo version: }" ;; "II ${mongo_service} is already up-to-date.") if [ -z "$before_version" ]; then msgerr+=("expected a 'current version' line before the 'up-to-date' one.") continue fi after_version="$before_version" uptodate=1 ;; "II Successfully upgraded from ${before_version} to "*) after_version="${rline#II Successfully upgraded from ${before_version} to }" upgraded=1 ;; *) : ;; esac done < <( ## -q to remove the display of ``compose`` related information ## like relation resolution. ## -c on the upgrade action to force color ansi_color=yes p-0a-err compose -c "${opts_compose[@]}" upgrade "$mongo_service" --no-hint -c "$TARGET_VERSION" )
if [ "$errlvl" != 0 ]; then exit "$errlvl" fi if [ -n "$uptodate" ]; then for container in "${containers[@]}"; do [ -n "$container" ] || continue Wrap -d "start ${DARKYELLOW}${mongo_service}${NORMAL}'s container" -- \ docker start "$container" || { err "Failed to start container '$container'." exit 1 } done exit 0 fi if [ -z "$upgraded" ]; then err "Unexpected output of 'upgrade' action with errorlevel 0 and without success" exit 1 fi
desc="update \`compose.yml\` to set ${DARKYELLOW}$mongo_service${NORMAL}'s " desc+="docker image to actual code version ${WHITE}${after_version}${NORMAL}" Wrap -d "$desc" -- \ compose:file:value-change \ "${mongo_service}.docker-compose.image" \ "docker.0k.io/mongo:${after_version}-myc" || exit 1
echo "${WHITE}Launching final compose${NORMAL}" compose up || exit 1 }
cmdline.spec.gnu bench
cmdline.spec::cmd:bench:run() {
depends sysbench
nbthread=$(lscpu | egrep "^CPU\(s\):" | cut -f 2 -d : | xargs echo) single=$(sysbench cpu --cpu-max-prime=20000 run --threads=1 | grep "events per" | cut -f 2 -d : | xargs echo) threaded=$(sysbench cpu --cpu-max-prime=20000 run --threads="$nbthread" | grep "events per" | cut -f 2 -d : | xargs echo) echo "$threaded / $single / $nbthread"
}
cmdline::parse "$@"
|