You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
 
 

1394 lines
41 KiB

#!/bin/bash
. /etc/shlib
include common
include parse
include cmdline
include config
include cache
include fn
include docker
[[ "${BASH_SOURCE[0]}" != "${0}" ]] && SOURCED=true
version=0.1
desc='Install backup'
help=""
docker:running-container-projects() {
:cache: scope=session
docker ps --format '{{.Label "com.docker.compose.project"}}' | sort | uniq
}
decorator._mangle_fn docker:running-container-projects
ssh:mk-private-key() {
local host="$1" service_name="$2"
(
settmpdir VPS_TMPDIR
ssh-keygen -t rsa -N "" -f "$VPS_TMPDIR/rsync_rsa" -C "$service_name@$host" >/dev/null
cat "$VPS_TMPDIR/rsync_rsa"
)
}
mailcow:has-images-running() {
local images
images=$(docker ps --format '{{.Image}}' | sort | uniq)
[[ $'\n'"$images" == *$'\n'"mailcow/"* ]]
}
mailcow:has-container-project-mentionning-mailcow() {
local projects
projects=$(docker:running-container-projects) || return 1
[[ $'\n'"$projects"$'\n' == *mailcow* ]]
}
mailcow:has-running-containers() {
mailcow:has-images-running ||
mailcow:has-container-project-mentionning-mailcow
}
mailcow:get-root() {
:cache: scope=session
local dir
for dir in {/opt{,/apps},/root}/mailcow-dockerized; do
[ -d "$dir" ] || continue
[ -r "$dir/mailcow.conf" ] || continue
echo "$dir"
return 0
done
return 1
}
decorator._mangle_fn mailcow:get-root
compose:get-compose-yml() {
:cache: scope=session
local path
[ -e "/etc/compose/local.conf" ] && . "/etc/compose/local.conf"
path=${DEFAULT_COMPOSE_FILE:-/etc/compose/compose.yml}
[ -e "$path" ] || return 1
echo "$path"
}
decorator._mangle_fn compose:get-compose-yml
compose:has-container-project-myc() {
local projects
projects=$(docker:running-container-projects) || return 1
[[ $'\n'"$projects"$'\n' == *$'\n'"myc"$'\n'* ]]
}
type:is-mailcow() {
mailcow:get-root >/dev/null ||
mailcow:has-running-containers
}
type:is-compose() {
compose:get-compose-yml >/dev/null &&
compose:has-container-project-myc
}
vps:get-type() {
:cache: scope=session
local fn
for fn in $(declare -F | cut -f 3 -d " " | egrep "^type:is-"); do
"$fn" && {
echo "${fn#type:is-}"
return 0
}
done
return 1
}
decorator._mangle_fn vps:get-type
mirror-dir:sources() {
:cache: scope=session
if ! shyaml get-values default.sources < /etc/mirror-dir/config.yml; then
err "Couldn't query 'default.sources' in '/etc/mirror-dir/config.yml'."
return 1
fi
}
decorator._mangle_fn mirror-dir:sources
mirror-dir:check-add() {
local elt="$1" sources
sources=$(mirror-dir:sources) || return 1
if [[ $'\n'"$sources"$'\n' == *$'\n'"$elt"$'\n'* ]]; then
info "Volume $elt already in sources"
else
Elt "Adding directory $elt"
sed -i "/sources:/a\ - \"${elt}\"" \
/etc/mirror-dir/config.yml
Feedback || return 1
fi
}
mirror-dir:check-add-vol() {
local elt="$1"
mirror-dir:check-add "/var/lib/docker/volumes/*_${elt}-*/_data"
}
## The first colon is to prevent auto-export of function from shlib
: ; bash-bug-5() { { cat; } < <(e) >/dev/null; ! cat "$1"; } && bash-bug-5 <(e) 2>/dev/null &&
export BASH_BUG_5=1 && unset -f bash_bug_5
wrap() {
local label="$1" code="$2"
shift 2
export VERBOSE=1
interpreter=/bin/bash
if [ -n "$BASH_BUG_5" ]; then
(
settmpdir tmpdir
fname=${label##*/}
e "$code" > "$tmpdir/$fname" &&
chmod +x "$tmpdir/$fname" &&
Wrap -vsd "$label" -- "$interpreter" "$tmpdir/$fname" "$@"
)
else
Wrap -vsd "$label" -- "$interpreter" <(e "$code") "$@"
fi
}
ping_check() {
#global ignore_ping_check
local host="$1"
ip=$(getent ahosts "$host" | egrep "^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\s+" |
head -n 1 | cut -f 1 -d " ") || return 1
my_ip=$(curl -s myip.kal.fr)
if [ "$ip" != "$my_ip" ]; then
if [ -n "$ignore_ping_check" ]; then
warn "IP of '$host' ($ip) doesn't match mine ($my_ip). Ignoring due to ``--ignore-ping-check`` option."
else
err "IP of '$host' ($ip) doesn't match mine ($my_ip). Use ``--ignore-ping-check`` to ignore check."
return 1
fi
fi
}
mailcow:install-backup() {
local BACKUP_SERVER="$1" ignore_ping_check="$2" mailcow_root DOMAIN
## find installation
mailcow_root=$(mailcow:get-root) || {
err "Couldn't find a valid mailcow root directory."
return 1
}
## check ok
DOMAIN=$(cat "$mailcow_root/.env" | grep ^MAILCOW_HOSTNAME= | cut -f 2 -d =) || {
err "Couldn't find MAILCOW_HOSTNAME in file \"$mailcow_root/.env\"."
return 1
}
ping_check "$DOMAIN" || return 1
MYSQL_ROOT_PASSWORD=$(cat "$mailcow_root/.env" | grep ^DBROOT= | cut -f 2 -d =) || {
err "Couldn't find DBROOT in file \"$mailcow_root/.env\"."
return 1
}
MYSQL_CONTAINER=${MYSQL_CONTAINER:-mailcowdockerized_mysql-mailcow_1}
container_id=$(docker ps -f name="$MYSQL_CONTAINER" --format "{{.ID}}")
if [ -z "$container_id" ]; then
err "Couldn't find docker container named '$MYSQL_CONTAINER'."
return 1
fi
export KEY_BACKUP_ID="mailcow"
export MYSQL_ROOT_PASSWORD
export MYSQL_CONTAINER
export BACKUP_SERVER
export DOMAIN
wrap "Install rsync-backup on host" "
cd /srv/charm-store/rsync-backup
bash ./hooks/install.d/60-install.sh
" || return 1
wrap "Mysql dump install" "
cd /srv/charm-store/mariadb
bash ./hooks/install.d/60-backup.sh
" || return 1
## Using https://github.com/mailcow/mailcow-dockerized/blob/master/helper-scripts/backup_and_restore.sh
for elt in "vmail{,-attachments-vol}" crypt redis rspamd postfix; do
mirror-dir:check-add-vol "$elt" || return 1
done
mirror-dir:check-add "$mailcow_root" || return 1
mirror-dir:check-add "/var/backups/mysql" || return 1
mirror-dir:check-add "/etc" || return 1
dest="$BACKUP_SERVER"
dest="${dest%/*}"
ssh_options=()
if [[ "$dest" == *":"* ]]; then
port="${dest##*:}"
dest="${dest%%:*}"
ssh_options=(-p "$port")
else
port=""
dest="${dest%%:*}"
fi
info "You can run this following command from an host having admin access to $dest:"
echo " (Or send it to a backup admin of $dest)" >&2
echo "ssh ${ssh_options[@]} myadmin@$dest ssh-key add '$(cat /var/lib/rsync/.ssh/id_rsa.pub)'"
}
compose:has_domain() {
local compose_file="$1" host="$2" name conf relation relation_value domain server_aliases
while read-0 name conf ; do
name=$(e "$name" | shyaml get-value)
if [[ "$name" =~ ^[a-zA-Z0-9-]+\.[a-zA-Z0-9-]+ ]]; then
[ "$host" == "$name" ] && return 0
fi
rel=$(e "$conf" | shyaml -y get-value relations 2>/dev/null) || continue
for relation in web-proxy publish-dir; do
relation_value=$(e "$rel" | shyaml -y get-value "$relation" 2>/dev/null) || continue
while read-0 label conf_relation; do
domain=$(e "$conf_relation" | shyaml get-value "domain" 2>/dev/null) && {
[ "$host" == "$domain" ] && return 0
}
server_aliases=$(e "$conf_relation" | shyaml get-values "server-aliases" 2>/dev/null) && {
[[ $'\n'"$server_aliases" == *$'\n'"$host"$'\n'* ]] && return 0
}
done < <(e "$relation_value" | shyaml -y key-values-0)
done
done < <(shyaml -y key-values-0 < "$compose_file")
return 1
}
compose:install-backup() {
local BACKUP_SERVER="$1" service_name="$2" compose_file="$3" ignore_ping_check="$4" ignore_domain_check="$5"
## XXXvlab: far from perfect as it mimics and depends internal
## logic of current default way to get a domain in compose-core
host=$(hostname)
if ! compose:has_domain "$compose_file" "$host"; then
if [ -n "$ignore_domain_check" ]; then
warn "domain of '$host' not found in compose file '$compose_file'. Ignoring due to ``--ignore-domain-check`` option."
else
err "domain of '$host' not found in compose file '$compose_file'. Use ``--ignore-domain-check`` to ignore check."
return 1
fi
fi
ping_check "$host" || return 1
if [ -e "/root/.ssh/rsync_rsa" ]; then
warn "deleting private key in /root/.ssh/rsync_rsa, has we are not using it anymore."
rm -fv /root/.ssh/rsync_rsa
fi
if [ -e "/root/.ssh/rsync_rsa.pub" ]; then
warn "deleting public key in /root/.ssh/rsync_rsa.pub, has we are not using it anymore."
rm -fv /root/.ssh/rsync_rsa.pub
fi
if service_cfg=$(cat "$compose_file" |
shyaml get-value -y "$service_name" 2>/dev/null); then
info "Entry for service ${DARKYELLOW}$service_name${NORMAL}" \
"is already present in '$compose_file'."
cfg=$(e "$service_cfg" | shyaml get-value -y options) || {
err "No ${WHITE}options${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
"entry in '$compose_file'."
return 1
}
private_key=$(e "$cfg" | shyaml get-value private-key) || return 1
target=$(e "$cfg" | shyaml get-value target) || return 1
if [ "$target" != "$BACKUP_SERVER" ]; then
err "Existing backup target '$target' is different" \
"from specified '$BACKUP_SERVER'"
return 1
fi
else
private_key=$(ssh:mk-private-key "$host" "$service_name")
cat <<EOF >> "$compose_file"
$service_name:
options:
ident: $host
target: $BACKUP_SERVER
private-key: |
$(e "$private_key" | sed -r 's/^/ /g')
EOF
fi
dest="$BACKUP_SERVER"
dest="${dest%/*}"
ssh_options=()
if [[ "$dest" == *":"* ]]; then
port="${dest##*:}"
dest="${dest%%:*}"
ssh_options=(-p "$port")
else
port=""
dest="${dest%%:*}"
fi
info "You can run this following command from an host having admin access to $dest:"
echo " (Or send it to a backup admin of $dest)" >&2
public_key=$(ssh-keygen -y -f <(e "$private_key"$'\n'))
echo "ssh ${ssh_options[@]} myadmin@$dest ssh-key add '$public_key compose@$host'"
}
backup-action() {
local action="$1"
shift
vps_type=$(vps:get-type) || {
err "Failed to get type of installation."
return 1
}
if ! fn.exists "${vps_type}:${action}"; then
err "type '${vps_type}' has no ${vps_type}:${action} implemented yet."
return 1
fi
"${vps_type}:${action}" "$@"
}
compose:get_default_backup_host_ident() {
local service_name="$1" ## Optional
local compose_file service_cfg cfg target
compose_file=$(compose:get-compose-yml)
service_name="${service_name:-rsync-backup}"
if ! service_cfg=$(cat "$compose_file" |
shyaml get-value -y "$service_name" 2>/dev/null); then
err "No service named '$service_name' found in 'compose.yml'."
return 1
fi
cfg=$(e "$service_cfg" | shyaml get-value -y options) || {
err "No ${WHITE}options${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
"entry in '$compose_file'."
return 1
}
if ! target=$(e "$cfg" | shyaml get-value target); then
err "No ${WHITE}options.target${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
"entry in '$compose_file'."
fi
if ! target=$(e "$cfg" | shyaml get-value target); then
err "No ${WHITE}options.target${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
"entry in '$compose_file'."
fi
if ! ident=$(e "$cfg" | shyaml get-value ident); then
err "No ${WHITE}options.ident${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
"entry in '$compose_file'."
fi
echo "$target $ident"
}
mailcow:get_default_backup_host_ident() {
local content cron_line ident found dest cmd_line
if ! [ -e "/etc/cron.d/mirror-dir" ]; then
err "No '/etc/cron.d/mirror-dir' found."
return 1
fi
content=$(cat /etc/cron.d/mirror-dir) || {
err "Can't read '/etc/cron.d/mirror-dir'."
return 1
}
if ! cron_line=$(e "$content" | grep "mirror-dir backup"); then
err "Can't find 'mirror-dir backup' line in '/etc/cron.d/mirror-dir'."
return 1
fi
cron_line=${cron_line%|*}
cmd_line=(${cron_line#*root})
found=
dest=
for arg in "${cmd_line[@]}"; do
[ -n "$found" ] && {
dest="$arg"
break
}
[ "$arg" == "-d" ] && {
found=1
}
done
if ! [[ "$dest" =~ ^[\'\"a-zA-Z0-9:/.-]+$ ]]; then
err "Can't find valid destination in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'."
return 1
fi
if [[ "$dest" == \"*\" ]] || [[ "$dest" == \'*\' ]]; then
## unquoting, the eval should be safe because of previous check
dest=$(eval e "$dest")
fi
if [ -z "$dest" ]; then
err "Can't find destination in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'."
return 1
fi
## looking for ident
found=
ident=
for arg in "${cmd_line[@]}"; do
[ -n "$found" ] && {
ident="$arg"
break
}
[ "$arg" == "-h" ] && {
found=1
}
done
if ! [[ "$ident" =~ ^[\'\"a-zA-Z0-9.-]+$ ]]; then
err "Can't find valid identifier in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'."
return 1
fi
if [[ "$ident" == \"*\" ]] || [[ "$ident" == \'*\' ]]; then
## unquoting, the eval should be safe because of previous check
ident=$(eval e "$ident")
fi
if [ -z "$ident" ]; then
err "Can't find destination in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'."
return 1
fi
echo "$dest $ident"
}
compose:service:containers() {
local project="$1" service="$2"
docker ps \
--filter label="com.docker.compose.project=$project" \
--filter label="compose.master-service=$service" \
--format="{{.ID}}"
}
export -f compose:service:containers
compose:service:container_one() {
local project="$1" service="$2" container_id
{
read-0a container_id || {
err "service ${DARKYELLOW}$service${NORMAL} has no running container."
return 1
}
if read-0a _; then
err "service ${DARKYELLOW}$service${NORMAL} has more than one running container."
return 1
fi
} < <(compose:service:containers "$project" "$service")
echo "$container_id"
}
export -f compose:service:container_one
compose:service:container_first() {
local project="$1" service="$2" container_id
{
read-0a container_id || {
err "service ${DARKYELLOW}$service${NORMAL} has no running container."
return 1
}
if read-0a _; then
warn "service ${DARKYELLOW}$service${NORMAL} has more than one running container."
fi
} < <(compose:service:containers "$project" "$service")
echo "$container_id"
}
export -f compose:service:container_first
compose:charm:containers() {
local project="$1" charm="$2"
docker ps \
--filter label="com.docker.compose.project=$project" \
--filter label="compose.charm=$charm" \
--format="{{.ID}}"
}
export -f compose:charm:containers
compose:charm:container_one() {
local project="$1" charm="$2" container_id
{
read-0a container_id || {
err "charm ${DARKPINK}$charm${NORMAL} has no running container in project '$project'."
return 1
}
if read-0a _; then
err "charm ${DARKPINK}$charm${NORMAL} has more than one running container."
return 1
fi
} < <(compose:charm:containers "$project" "$charm")
echo "$container_id"
}
export -f compose:charm:container_one
compose:charm:container_first() {
local project="$1" charm="$2" container_id
{
read-0a container_id || {
warn "charm ${DARKYELLOW}$charm${NORMAL} has no running container in project '$project'."
}
if read-0a _; then
warn "charm ${DARKYELLOW}$charm${NORMAL} has more than one running container."
fi
} < <(compose:charm:containers "$project" "$charm")
echo "$container_id"
}
export -f compose:charm:container_first
compose:get_url() {
local project_name="$1" service="$2" data_file network ip
data_file="/var/lib/compose/relations/${project_name}/${service}-frontend/web-proxy/data"
if [ -e "$data_file" ]; then
(
set -o pipefail
cat "$data_file" | shyaml get-value url
)
else
## Assume there are no frontend relation here, the url is direct IP
container_id=$(compose:service:container_one "${project_name}" "${service}") || return 1
network_ip=$(docker:container:network_ip_one "${container_id}") || return 1
IFS=":" read -r network ip <<<"$network_ip"
tcp_port=
for port in $(docker:exposed_ports "$container_id"); do
IFS="/" read port type <<<"$port"
[ "$type" == "tcp" ] || continue
tcp_port="$port"
break
done
echo -n "http://$ip"
[ -n "$tcp_port" ] && echo ":$tcp_port"
fi || {
err "Failed querying ${service} to frontend relation to get url."
return 1
}
}
export -f compose:get_url
compose:container:service() {
local container="$1" service
if ! service=$(docker:container:label "$container" "compose.service"); then
err "Failed to get service name from container ${container}."
return 1
fi
if [ -z "$service" ]; then
err "No service found for container ${container}."
return 1
fi
echo "$service"
}
export -f compose:container:service
compose:psql() {
local project_name="$1" dbname="$2"
container_id=$(compose:charm:container_one "$project_name" "postgres") || return 1
docker exec -i "${container_id}" psql -U postgres "$dbname"
}
export -f compose:psql
compose:pgm() {
local project_name="$1" container_network_ip container_ip container_network
shift
container_id=$(compose:charm:container_one "$project_name" "postgres") || return 1
service_name=$(compose:container:service "$container_id") || return 1
image_id=$(docker:container:image "$container_id") || return 1
container_network_ip=$(docker:container:network_ip_one "$container_id") || return 1
IFS=":" read -r container_network container_ip <<<"$container_network_ip"
pgpass="/srv/datastore/data/${service_name}/var/lib/postgresql/data/pgpass"
local final_pgm_docker_run_opts+=(
-u 0 -e prefix_pg_local_command=" "
--network "${container_network}"
-e PGHOST="$container_ip"
-e PGUSER=postgres
-v "$pgpass:/root/.pgpass"
"${pgm_docker_run_opts[@]}"
)
cmd=(docker run --rm \
"${final_pgm_docker_run_opts[@]}" \
"${image_id}" pgm "$@"
)
echo "${cmd[@]}"
"${cmd[@]}"
}
export -f compose:pgm
postgres:dump() {
local project_name="$1" src="$2" dst="$3"
(
settmpdir PGM_TMP_LOCATION
pgm_docker_run_opts=('-v' "${PGM_TMP_LOCATION}:/tmp/dump")
compose:pgm "$project_name" cp -f "$src" "/tmp/dump/dump.gz" &&
mv "$PGM_TMP_LOCATION/dump.gz" "$dst"
) || return 1
}
export -f postgres:dump
postgres:restore() {
local project_name="$1" src="$2" dst="$3"
full_src_path=$(readlink -e "$src") || exit 1
(
pgm_docker_run_opts=('-v' "${full_src_path}:/tmp/dump.gz")
compose:pgm "$project_name" cp -f "/tmp/dump.gz" "$dst"
) || return 1
}
export -f postgres:restore
cyclos:set_root_url() {
local project_name="$1" dbname="$2" url="$3"
echo "UPDATE configurations SET root_url = '$url';" |
compose:psql "$project_name" "$dbname" || {
err "Failed to set cyclos url value in '$dbname' database."
return 1
}
}
export -f cyclos:set_root_url
cyclos:unlock() {
local project_name="$1" dbname="$2" url="$3"
echo "delete from database_lock;" |
compose:psql "${project_name}" "${dbname}"
}
export -f cyclos:unlock
compose:project_name() {
if [ -z "$PROJECT_NAME" ]; then
PROJECT_NAME=$(compose --get-project-name) || {
err "Couldn't get project name."
return 1
}
if [ -z "$PROJECT_NAME" -o "$PROJECT_NAME" == "orphan" ]; then
err "Couldn't get project name, probably because 'compose.yml' wasn't found."
echo " Please ensure to either configure a global 'compose.yml' or run this command" >&2
echo " in a compose project (with 'compose.yml' on the top level directory)." >&2
return 1
fi
export PROJECT_NAME
fi
echo "$PROJECT_NAME"
}
export -f compose:project_name
compose:get_cron_docker_cmd() {
local cron_line cmd_line docker_cmd
project_name=$(compose:project_name) || return 1
if ! cron_line=$(docker exec "${project_name}"_cron_1 cat /etc/cron.d/rsync-backup | grep "\* \* \*"); then
err "Can't find cron_line in cron container."
echo " Have you forgotten to run 'compose up' ?" >&2
return 1
fi
cron_line=${cron_line%|*}
cron_line=${cron_line%"2>&1"*}
cmd_line="${cron_line#*root}"
eval "args=($cmd_line)"
## should be last argument
docker_cmd=$(echo ${args[@]: -1})
if ! [[ "$docker_cmd" == "docker run --rm -e "* ]]; then
echo "docker command found should start with 'docker run'." >&2
echo "Here's command:" >&2
echo " $docker_cmd" >&2
return 1
fi
e "$docker_cmd"
}
compose:recover-target() {
local backup_host="$1" ident="$2" src="$3" dst="$4" service_name="${5:-rsync-backup}" project_name
project_name=$(compose:project_name) || return 1
docker_image="${project_name}_${service_name}"
if ! docker_has_image "$docker_image"; then
compose build "${service_name}" || {
err "Couldn't find nor build image for service '$service_name'."
return 1
}
fi
dst="${dst%/}" ## remove final slash
ssh_options=(-o StrictHostKeyChecking=no)
if [[ "$backup_host" == *":"* ]]; then
port="${backup_host##*:}"
backup_host="${backup_host%%:*}"
ssh_options+=(-p "$port")
else
port=""
backup_host="${backup_host%%:*}"
fi
rsync_opts=(
-e "ssh ${ssh_options[*]} -i /var/lib/rsync/.ssh/id_rsa -l rsync"
-azvArH --delete --delete-excluded
--partial --partial-dir .rsync-partial
--numeric-ids
)
if [ "$DRY_RUN" ]; then
rsync_opts+=("-n")
fi
cmd=(
docker run --rm --entrypoint rsync \
-v "/srv/datastore/config/${service_name}/var/lib/rsync":/var/lib/rsync \
-v "${dst%/*}":/mnt/dest \
"$docker_image" \
"${rsync_opts[@]}" "$backup_host":"/var/mirror/$ident/$src" "/mnt/dest/${dst##*/}"
)
echo "${WHITE}Launching: ${NORMAL} ${cmd[@]}"
"${cmd[@]}"
}
mailcow:recover-target() {
local backup_host="$1" ident="$2" src="$3" dst="$4"
dst="${dst%/}" ## remove final slash
ssh_options=(-o StrictHostKeyChecking=no)
if [[ "$backup_host" == *":"* ]]; then
port="${backup_host##*:}"
backup_host="${backup_host%%:*}"
ssh_options+=(-p "$port")
else
port=""
backup_host="${backup_host%%:*}"
fi
rsync_opts=(
-e "ssh ${ssh_options[*]} -i /var/lib/rsync/.ssh/id_rsa -l rsync"
-azvArH --delete --delete-excluded
--partial --partial-dir .rsync-partial
--numeric-ids
)
if [ "$DRY_RUN" ]; then
rsync_opts+=("-n")
fi
cmd=(
rsync "${rsync_opts[@]}" "$backup_host":"/var/mirror/$ident/$src" "${dst}"
)
echo "${WHITE}Launching: ${NORMAL} ${cmd[@]}"
"${cmd[@]}"
}
[ "$SOURCED" ] && return 0
##
## Command line processing
##
cmdline.spec.gnu
cmdline.spec.reporting
cmdline.spec.gnu install
cmdline.spec::cmd:install:run() {
:
}
cmdline.spec.gnu get-type
cmdline.spec::cmd:get-type:run() {
vps:get-type
}
cmdline.spec:install:cmd:backup:run() {
: :posarg: BACKUP_SERVER 'Target backup server'
: :optfla: --ignore-domain-check \
"Allow to bypass the domain check in
compose file (only used in compose
installation)."
: :optfla: --ignore-ping-check "Allow to bypass the ping check of host."
local vps_type
vps_type=$(vps:get-type) || {
err "Failed to get type of installation."
return 1
}
if ! fn.exists "${vps_type}:install-backup"; then
err "type '${vps_type}' has no backup installation implemented yet."
return 1
fi
opts=()
[ "$opt_ignore_ping_check" ] &&
opts+=("--ignore-ping-check")
if [ "$vps_type" == "compose" ]; then
[ "$opt_ignore_domain_check" ] &&
opts+=("--ignore-domain-check")
fi
"cmdline.spec:install:cmd:${vps_type}-backup:run" "${opts[@]}" "$BACKUP_SERVER"
}
DEFAULT_BACKUP_SERVICE_NAME=rsync-backup
cmdline.spec.gnu compose-backup
cmdline.spec:install:cmd:compose-backup:run() {
: :posarg: BACKUP_SERVER 'Target backup server'
: :optval: --service-name,-s "YAML service name in compose
file to check for existence of key.
Defaults to '$DEFAULT_BACKUP_SERVICE_NAME'"
: :optval: --compose-file,-f "Compose file location. Defaults to
the value of '\$DEFAULT_COMPOSE_FILE'"
: :optfla: --ignore-domain-check \
"Allow to bypass the domain check in
compose file."
: :optfla: --ignore-ping-check "Allow to bypass the ping check of host."
local service_name compose_file
[ -e "/etc/compose/local.conf" ] && source /etc/compose/local.conf
compose_file=${opt_compose_file:-$DEFAULT_COMPOSE_FILE}
service_name=${opt_service_name:-$DEFAULT_BACKUP_SERVICE_NAME}
if ! [ -e "$compose_file" ]; then
err "Compose file not found in '$compose_file'."
return 1
fi
compose:install-backup "$BACKUP_SERVER" "$service_name" "$compose_file" \
"$opt_ignore_ping_check" "$opt_ignore_domain_check"
}
cmdline.spec:install:cmd:mailcow-backup:run() {
: :posarg: BACKUP_SERVER 'Target backup server'
: :optfla: --ignore-ping-check "Allow to bypass the ping check of host."
"mailcow:install-backup" "$BACKUP_SERVER" "$opt_ignore_ping_check"
}
cmdline.spec.gnu backup
cmdline.spec::cmd:backup:run() {
local vps_type
vps_type=$(vps:get-type) || {
err "Failed to get type of installation."
return 1
}
if ! fn.exists "cmdline.spec:backup:cmd:${vps_type}:run"; then
err "type '${vps_type}' has no backup process implemented yet."
return 1
fi
"cmdline.spec:backup:cmd:${vps_type}:run"
}
cmdline.spec:backup:cmd:mailcow:run() {
local cmd_line cron_line cmd
for f in mysql-backup mirror-dir; do
[ -e "/etc/cron.d/$f" ] || {
err "Can't find '/etc/cron.d/$f'."
echo " Have you forgotten to run 'vps install backup BACKUP_HOST' ?" >&2
return 1
}
if ! cron_line=$(cat "/etc/cron.d/$f" |
grep -v "^#" | grep "\* \* \*"); then
err "Can't find cron_line in '/etc/cron.d/$f'." \
"Have you modified it ?"
return 1
fi
cron_line=${cron_line%|*}
cmd_line=(${cron_line#*root})
if [ "$f" == "mirror-dir" ]; then
cmd=()
for arg in "${cmd_line[@]}"; do
[ "$arg" != "-q" ] && cmd+=("$arg")
done
else
cmd=("${cmd_line[@]}")
fi
code="${cmd[*]}"
echo "${WHITE}Launching:${NORMAL} ${code}"
{
{
(
## Some commands are using colors that are already
## set by this current program and will trickle
## down unwantedly
ansi_color no
eval "${code}"
) | sed -r "s/^/ ${GRAY}|${NORMAL} /g"
set_errlvl "${PIPESTATUS[0]}"
} 3>&1 1>&2 2>&3 | sed -r "s/^/ $DARKRED\!$NORMAL /g"
set_errlvl "${PIPESTATUS[0]}"
} 3>&1 1>&2 2>&3
if [ "$?" != "0" ]; then
err "Failed."
return 1
fi
done
info "Mysql backup and subsequent mirror-dir ${DARKGREEN}succeeded${NORMAL}."
}
set_errlvl() { return "${1:-1}"; }
cmdline.spec:backup:cmd:compose:run() {
local cron_line args
project_name=$(compose:project_name) || return 1
docker_cmd=$(compose:get_cron_docker_cmd) || return 1
echo "${WHITE}Launching:${NORMAL} docker exec -i "${project_name}_cron_1" $docker_cmd"
{
{
eval "docker exec -i \"${project_name}_cron_1\" $docker_cmd" | sed -r "s/^/ ${GRAY}|${NORMAL} /g"
set_errlvl "${PIPESTATUS[0]}"
} 3>&1 1>&2 2>&3 | sed -r "s/^/ $DARKRED\!$NORMAL /g"
set_errlvl "${PIPESTATUS[0]}"
} 3>&1 1>&2 2>&3
if [ "$?" != "0" ]; then
err "Failed."
return 1
fi
info "mirror-dir ${DARKGREEN}succeeded${NORMAL}."
}
cmdline.spec.gnu recover-target
cmdline.spec::cmd:recover-target:run() {
: :posarg: BACKUP_DIR 'Source directory on backup side'
: :posarg: HOST_DIR 'Target directory on host side'
: :optval: --backup-host,-B "The backup host"
: :optfla: --dry-run,-n "Don't do anything, instead tell what it
would do."
## if no backup host take the one by default
backup_host="$opt_backup_host"
if [ -z "$backup_host" ]; then
backup_host_ident=$(backup-action get_default_backup_host_ident) || return 1
read -r backup_host ident <<<"$backup_host_ident"
fi
if [[ "$BACKUP_DIR" == /* ]]; then
err "BACKUP_DIR must be a relative path from the root of your backup."
return 1
fi
REAL_HOST_DIR=$(realpath "$HOST_DIR") || {
err "Can't find HOST_DIR '$HOST_DIR'."
return 1
}
export DRY_RUN="${opt_dry_run}"
backup-action recover-target "$backup_host" "$ident" "$BACKUP_DIR" "$REAL_HOST_DIR"
}
cmdline.spec.gnu odoo
cmdline.spec::cmd:odoo:run() {
:
}
cmdline.spec.gnu restart
cmdline.spec:odoo:cmd:restart:run() {
: :optval: --service,-s "The service (defaults to 'odoo')"
local out odoo_service
odoo_service="${opt_service:-odoo}"
project_name=$(compose:project_name) || return 1
if ! out=$(docker restart "${project_name}_${odoo_service}_1" 2>&1); then
if [[ "$out" == *"no matching entries in passwd file" ]]; then
warn "Catched docker bug. Restarting once more."
if ! out=$(docker restart "${project_name}_${odoo_service}_1"); then
err "Can't restart container ${project_name}_${odoo_service}_1 (restarted twice)."
echo " output:" >&2
echo "$out" | prefix " ${GRAY}|${NORMAL} " >&2
exit 1
fi
else
err "Couldn't restart container ${project_name}_${odoo_service}_1 (and no restart bug detected)."
exit 1
fi
fi
info "Container ${project_name}_${odoo_service}_1 was ${DARKGREEN}successfully${NORMAL} restarted."
}
cmdline.spec.gnu restore
cmdline.spec:odoo:cmd:restore:run() {
: :posarg: ZIP_DUMP_LOCATION 'Source odoo dump file to restore
(can be a local file or an url)'
: :optval: --service,-s "The service (defaults to 'odoo')"
: :optval: --database,-d 'Target database (default if not specified)'
local out
odoo_service="${opt_service:-odoo}"
if [[ "$ZIP_DUMP_LOCATION" == "http://"* ]] ||
[[ "$ZIP_DUMP_LOCATION" == "https://"* ]]; then
settmpdir ZIP_TMP_LOCATION
tmp_location="$ZIP_TMP_LOCATION/dump.zip"
curl -k -s -L "$ZIP_DUMP_LOCATION" > "$tmp_location" || {
err "Couldn't get '$ZIP_DUMP_LOCATION'."
exit 1
}
if [[ "$(dd if="$tmp_location" count=2 bs=1 2>/dev/null)" != "PK" ]]; then
err "Download doesn't seem to be a zip file."
dd if="$tmp_location" count=1 bs=256 | hd | prefix " ${GRAY}|${NORMAL} " >&2
exit 1
fi
info "Successfully downloaded '$ZIP_DUMP_LOCATION'"
echo " in '$tmp_location'." >&2
ZIP_DUMP_LOCATION="$tmp_location"
fi
[ -e "$ZIP_DUMP_LOCATION" ] || {
err "No file '$ZIP_DUMP_LOCATION' found." >&2
exit 1
}
#cmdline.spec:odoo:cmd:restart:run --service "$odoo_service" || exit 1
msg_dbname=default
[ -n "$opt_database" ] && msg_dbname="'$opt_database'"
compose --no-hooks drop "$odoo_service" $opt_database || {
err "Error dropping $msg_dbname database of service ${DARKYELLOW}$odoo_service${NORMAL}."
exit 1
}
compose --no-hooks load "$odoo_service" $opt_database < "$ZIP_DUMP_LOCATION" || {
err "Error restoring service ${DARKYELLOW}$odoo_service${NORMAL} to $msg_dbname database."
exit 1
}
info "Successfully restored ${DARKYELLOW}$odoo_service${NORMAL} to $msg_dbname database."
## Restart odoo, ensure there is no bugs lingering on it.
cmdline.spec:odoo:cmd:restart:run --service "$odoo_service" || exit 1
}
cmdline.spec.gnu dump
cmdline.spec:odoo:cmd:dump:run() {
: :posarg: DUMP_ZIPFILE 'Target path to store odoo dump zip file.'
: :optval: --database,-d 'Target database (default if not specified)'
: :optval: --service,-s "The service (defaults to 'odoo')"
odoo_service="${opt_service:-odoo}"
msg_dbname=default
[ -n "$opt_database" ] && msg_dbname="'$opt_database'"
compose --no-hooks save "$odoo_service" $opt_database > "$DUMP_ZIPFILE" || {
err "Error dumping ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database to '$DUMP_ZIPFILE'."
exit 1
}
info "Successfully dumped ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database to '$DUMP_ZIPFILE'."
}
cmdline.spec.gnu drop
cmdline.spec:odoo:cmd:drop:run() {
: :optval: --database,-d 'Target database (default if not specified)'
: :optval: --service,-s "The service (defaults to 'odoo')"
odoo_service="${opt_service:-odoo}"
msg_dbname=default
[ -n "$opt_database" ] && msg_dbname="'$opt_database'"
compose --no-hooks drop "$odoo_service" $opt_database || {
err "Error dropping ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database."
exit 1
}
info "Successfully dropped ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database."
}
cmdline.spec.gnu set-cyclos-url
cmdline.spec:odoo:cmd:set-cyclos-url:run() {
: :optval: --database,-d "Target database ('odoo' if not specified)"
: :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
local URL
dbname=${opt_database:-odoo}
cyclos_service="${opt_service:-cyclos}"
project_name=$(compose:project_name) || exit 1
URL=$(compose:get_url "${project_name}" "${cyclos_service}") || exit 1
Wrap -d "set cyclos url to '$URL'" <<EOF || exit 1
echo "UPDATE res_company SET cyclos_server_url = '$URL/api' WHERE id=1;" |
compose:psql "$project_name" "$dbname" || {
err "Failed to set cyclos url value in '$dbname' database."
exit 1
}
EOF
}
cmdline.spec.gnu cyclos
cmdline.spec::cmd:cyclos:run() {
:
}
cmdline.spec:cyclos:cmd:dump:run() {
: :posarg: DUMP_GZFILE 'Target path to store odoo dump gz file.'
: :optval: --database,-d "Target database ('cyclos' if not specified)"
: :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
cyclos_service="${opt_service:-cyclos}"
cyclos_database="${opt_database:-cyclos}"
project_name=$(compose:project_name) || exit 1
container_id=$(compose:service:container_one "$project_name" "${cyclos_service}") || exit 1
Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
docker stop "$container_id" || exit 1
Wrap -d "Dump postgres database '${cyclos_database}'." -- \
postgres:dump "${project_name}" "$cyclos_database" "$DUMP_GZFILE" || exit 1
Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
docker start "${container_id}" || exit 1
}
cmdline.spec.gnu restore
cmdline.spec:cyclos:cmd:restore:run() {
: :posarg: GZ_DUMP_LOCATION 'Source cyclos dump file to restore
(can be a local file or an url)'
: :optval: --service,-s "The service (defaults to 'cyclos')"
: :optval: --database,-d 'Target database (default if not specified)'
local out
cyclos_service="${opt_service:-cyclos}"
cyclos_database="${opt_database:-cyclos}"
project_name=$(compose:project_name) || exit 1
url=$(compose:get_url "${project_name}" "${cyclos_service}") || return 1
container_id=$(compose:service:container_one "$project_name" "${cyclos_service}") || exit 1
if [[ "$GZ_DUMP_LOCATION" == "http://"* ]] ||
[[ "$GZ_DUMP_LOCATION" == "https://"* ]]; then
settmpdir GZ_TMP_LOCATION
tmp_location="$GZ_TMP_LOCATION/dump.gz"
Wrap -d "get '$GZ_DUMP_LOCATION'" <<EOF || exit 1
## Note that curll version before 7.76.0 do not have
curl -k -s -L "$GZ_DUMP_LOCATION" --fail \\
> "$tmp_location" || {
echo "Error fetching ressource. Is url correct ?" >&2
exit 1
}
if [[ "\$(dd if="$tmp_location" count=2 bs=1 2>/dev/null |
hexdump -v -e "/1 \"%02x\"")" != "1f8b" ]]; then
err "Download doesn't seem to be a gzip file."
dd if="$tmp_location" count=1 bs=256 | hd | prefix " ${GRAY}|${NORMAL} " >&2
exit 1
fi
EOF
GZ_DUMP_LOCATION="$tmp_location"
fi
[ -e "$GZ_DUMP_LOCATION" ] || {
err "No file '$GZ_DUMP_LOCATION' found." >&2
exit 1
}
Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
docker stop "$container_id" || exit 1
## XXXvlab: making the assumption that the postgres username should
## be the same as the cyclos service selected (which is the default,
## but not always the case).
Wrap -d "restore postgres database '${cyclos_database}'." -- \
postgres:restore "$project_name" "$GZ_DUMP_LOCATION" "${cyclos_service}@${cyclos_database}" || exit 1
## ensure that the database is not locked
Wrap -d "check and remove database lock if any" -- \
cyclos:unlock "${project_name}" "${cyclos_database}" || exit 1
Wrap -d "set root url to '$url'" -- \
cyclos:set_root_url "${project_name}" "${cyclos_database}" "${url}" || exit 1
Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
docker start "${container_id}" || exit 1
}
cmdline.spec.gnu set-root-url
cmdline.spec:cyclos:cmd:set-root-url:run() {
: :optval: --database,-d "Target database ('cyclos' if not specified)"
: :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
local URL
cyclos_database=${opt_database:-cyclos}
cyclos_service="${opt_service:-cyclos}"
project_name=$(compose:project_name) || exit 1
url=$(compose:get_url "${project_name}" "${cyclos_service}") || exit 1
container_id=$(compose:service:container_one "${project_name}" "${cyclos_service}") || exit 1
Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
docker stop "$container_id" || exit 1
Wrap -d "set root url to '$url'" -- \
cyclos:set_root_url "${project_name}" "${cyclos_database}" "${url}" || exit 1
Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
docker start "${container_id}" || exit 1
}
cmdline.spec.gnu unlock
cmdline.spec:cyclos:cmd:unlock:run() {
: :optval: --database,-d "Target database ('cyclos' if not specified)"
: :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
local URL
cyclos_database=${opt_database:-cyclos}
cyclos_service="${opt_service:-cyclos}"
project_name=$(compose:project_name) || exit 1
container_id=$(compose:service:container_one "${project_name}" "${cyclos_service}") || exit 1
Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
docker stop "$container_id" || exit 1
Wrap -d "check and remove database lock if any" -- \
cyclos:unlock "${project_name}" "${cyclos_database}" || exit 1
Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
docker start "${container_id}" || exit 1
}
cmdline::parse "$@"