diff --git a/cron/README.org b/cron/README.org new file mode 100644 index 0000000..2f68b10 --- /dev/null +++ b/cron/README.org @@ -0,0 +1,19 @@ +# -*- ispell-local-dictionary: "english" -*- + +* Usage + +By adding =cron= as a service, all other services in auto pair mode, +requiring a =schedule-command= will use it. + +#+begin_src yaml +cron: +#+end_src + +There are no options to set. + +** =schedule-command= relation + +If most other services will have default options and set these values +automatically. You probably don't need to configure anything in the +relation's options if defaults suits you. + diff --git a/cron/build/Dockerfile b/cron/build/Dockerfile index 7e7c5d1..ec5f145 100644 --- a/cron/build/Dockerfile +++ b/cron/build/Dockerfile @@ -1,13 +1,27 @@ -FROM docker.0k.io/debian:jessie +FROM alpine:3.18 AS build -RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y cron moreutils && \ - apt-get clean && \ - rm -rf /var/lib/apt/lists/* +ENV DOCKER_VERSION=25.0.2 -COPY ./src/usr/bin/lock /usr/bin/lock -COPY ./src/usr/bin/docker-17.06.2-ce /usr/bin/docker +RUN apk add --no-cache curl -COPY ./entrypoint.sh /entrypoint.sh +#RUN curl -L https://download.docker.com/linux/static/stable/x86_64/docker-"$DOCKER_VERSION".tgz | \ +RUN curl -L https://docker.0k.io/downloads/docker-"$DOCKER_VERSION".tgz | \ + tar -xz -C /tmp/ \ + && mv /tmp/docker/docker /usr/bin/docker +RUN curl -L https://docker.0k.io/downloads/lock-40a4b8f > /usr/bin/lock \ + && chmod +x /usr/bin/lock -ENTRYPOINT [ "/entrypoint.sh" ] +FROM alpine:3.18 + +## Used by `lock` +RUN apk add --no-cache bash + +## /usr/bin/dc is a calculator provided by busybox that conflicts with +## the `dc` command provided by `compose`. We have no need of busybox +## calculator +RUN rm /usr/bin/dc + +COPY --from=build /usr/bin/docker /usr/bin/docker +COPY --from=build /usr/bin/lock /usr/bin/lock + +ENTRYPOINT [ "crond", "-f", "-l", "0" ] diff --git a/cron/build/README b/cron/build/README deleted file mode 100644 index 6779f5f..0000000 --- a/cron/build/README +++ /dev/null @@ -1,16 +0,0 @@ - - -Warning, this charm will require access to ``/var/run/docker.sock``, -and this IS EQUIVALENT to root access to host. - -Warning, must use ``/etc/cron`` and not ``/etc/cron.d``. - - -docker was downloaded with: - -wget https://get.docker.com/builds/Linux/x86_64/docker-1.9.1 - - -It changed, check: - - https://download.docker.com/linux/static/stable/x86_64/ \ No newline at end of file diff --git a/cron/build/entrypoint.sh b/cron/build/entrypoint.sh deleted file mode 100755 index 111cc68..0000000 --- a/cron/build/entrypoint.sh +++ /dev/null @@ -1,32 +0,0 @@ -#!/bin/bash - -## -## /var/log might be plugged into an empty volume for saving logs, so we -## must make sure that /var/log/exim4 exists and has correct permissions. - -mkdir -p /var/log/exim4 -chmod -R u+rw /var/log/exim4 -chown -R Debian-exim /var/log/exim4 - - -echo "Propagating docker shell environment variables to CRON scripts." - -rm -f /etc/cron.d/* -cp -a /etc/cron/* /etc/cron.d/ - -for f in /etc/crontab /etc/cron.d/*; do - [ -e "$f" ] || continue - mv "$f" /tmp/tempfile - { - declare -xp | egrep '_PORT_[0-9]+_' | sed -r 's/^declare -x //g' - echo "TZ=$TZ" - echo - cat /tmp/tempfile - } > "$f" - rm /tmp/tempfile -done - - -echo "Launching cron." -## Give back PID 1, so that cron process receives signals. -exec /usr/sbin/cron -f diff --git a/cron/build/src/usr/bin/README b/cron/build/src/usr/bin/README deleted file mode 100644 index d0f40ba..0000000 --- a/cron/build/src/usr/bin/README +++ /dev/null @@ -1,2 +0,0 @@ -WARNING, lock shell script is a copy from ``kal-scripts``. Please -do not do any modification to it without sending it back to ``kal-scripts``. diff --git a/cron/build/src/usr/bin/docker-1.9.1 b/cron/build/src/usr/bin/docker-1.9.1 deleted file mode 100755 index 8090020..0000000 Binary files a/cron/build/src/usr/bin/docker-1.9.1 and /dev/null differ diff --git a/cron/build/src/usr/bin/docker-17.06.2-ce b/cron/build/src/usr/bin/docker-17.06.2-ce deleted file mode 100755 index de3d01b..0000000 Binary files a/cron/build/src/usr/bin/docker-17.06.2-ce and /dev/null differ diff --git a/cron/build/src/usr/bin/lock b/cron/build/src/usr/bin/lock deleted file mode 100755 index 04d9d8b..0000000 --- a/cron/build/src/usr/bin/lock +++ /dev/null @@ -1,363 +0,0 @@ -#!/bin/bash - -## -## TODO -## - don't sleep 1 but wait in flock for 1 second -## - every waiting proc should write at least their PID and priority, -## to leave alive PID with higher priority the precedence. (and probably -## a check to the last probing time, and invalidate it if it is higher than 10s -## for example.) -## - could add the time they waited in the waiting list, and last probe. -## - should execute "$@", if user needs '-c' it can run ``bash -c ""`` - -exname="$(basename "$0")" -usage="$exname LOCKLABELS [-k] [FLOCK_OPTIONS] -- [CMD...]" - -verb() { [ -z "$verbose" ] || echo "$@" >&2 ; } -err() { echo "$@" >&2; } -die() { echo "$@" >&2; exit 1; } - -md5_compat() { md5sum | cut -c -32; true; } - -LOCKLABELS= -flock_opts=() -command=() -nonblock= -errcode=1 -timeout= -cmd= -priority=1 -remove_duplicate= -while [ "$1" ]; do - case "$1" in - -h|--help) - echo "$help" - exit 0 - ;; - -V|--version) - echo "$version" - exit 0 - ;; - -c) - cmd="$2" - shift - ;; - -p|--priority) - priority=$2 - shift - ;; - -D) - remove_duplicate=true - ;; - -k) - kill=yes - ;; - -n|--nb|--nonblock) - nonblock=true - ;; - -w|--wait|--timeout) - timeout=$2 ## will manage this - shift - ;; - -E|--conflict-exit-code) - errcode=$2 ## will manage this - shift - ;; - -v|--verbose) - verbose=true ## will manage this - ;; - -n|--nb|--nonblock) - nonblock=true ## will manage this - ;; - --) - [ "$cmd" ] && die "'--' and '-c' are mutualy exclusive" - shift - command+=("$@") - break 2 - ;; - *) - [ -z "$LOCKLABELS" ] && { LOCKLABELS=$1 ; shift ; continue ; } - flock_opts+=("$1") - ;; - esac - shift -done - -if [ -z "$LOCKLABELS" ]; then - err "You must provide a lock file as first argument." - err "$usage" - exit 1 -fi - -if [ "$remove_duplicate" ]; then - md5code=$( - if [ "$cmd" ]; then - echo bash -c "$cmd" - else - echo "${command[@]}" - fi | md5_compat) -fi - - -function is_int () { [[ "$1" =~ ^-?[0-9]+$ ]] ; } - -is_pid_alive() { - local pid="$1" - ps --pid "$pid" >/dev/null 2>&1 -} - - -is_pgid_alive() { - local pgid="$1" - [ "$(ps -e -o pgid,pid= | egrep "^ *$pgid ")" ] -} - - -pgid_from_pid() { - local pid="$1" - pgid=$(ps -o pgid= "$pid" 2>/dev/null | egrep -o "[0-9]+") - if ! is_int "$pgid"; then - err "Could not retrieve a valid PGID from PID '$pid' (returned '$pgid')." - return 1 - fi - echo "$pgid" -} - - -ensure_kill() { - local pid="$1" timeout=5 start=$SECONDS kill_count=0 pgid - pgid=$(pgid_from_pid "$pid") - while is_pid_alive "$pid"; do - if is_pgid_alive "$pgid"; then - if [ "$kill_count" -gt 4 ]; then - err "FATAL: duplicate command, GPID=$pgid has resisted kill procedure. Aborting." - return 1 - elif [ "$kill_count" -gt 2 ]; then - err "duplicate command, PGID wouldn't close itself, force kill PGID: kill -9 -- -$pgid" - kill -9 -- "$pgid" - sleep 1 - else - err "duplicate command, Sending SIGKILL to PGID: kill -- -$pgid" - kill -- -"$pgid" - sleep 1 - fi - ((kill_count++)) - fi - if [ "$((SECONDS - start))" -gt "$timeout" ]; then - err "timeout reached. $pid" - return 1 - fi - done - return 0 -} - - -acquire_pid_file() { - local label=$1 - lockfile="/var/lock/lockcmd-$label.lock" - mkdir -p /var/run/lockcmd - pidfile="/var/run/lockcmd/$label.pid" - export pidfile - ( - verb() { [ -z "$verbose" ] || echo "$exname($label) $pid> $@" >&2 ; } - err() { echo "$exname($label) $pid> $@" >&2; } - - start=$SECONDS - kill_count=0 - pgid_not_alive_count=0 - while true; do - ## ask for lock on $lockfile (fd 200) - if ! flock -n -x 200; then - verb "Couldn't acquire primary lock... (elapsed $((SECONDS - start)))" - else - verb "Acquired lock '$label' on pidfile, inspecting pidfile." - if ! [ -e "$pidfile" ]; then - verb "No pidfile, inscribing my PID" - echo -e "$pid $priority" > "$pidfile" - exit 0 - fi - - if ! content=$(cat "$pidfile" 2>/dev/null); then - err "Can't read $pidfile" - exit 1 - fi - read opid opriority < <(echo "$content" | head -n 1) - opriority=${opriority:-1} - verb "Previous PID is $opid, with priority $opriority" - if ! is_pid_alive "$opid"; then - err "Ignoring stale PID $opid" - echo -e "$pid $priority" > "$pidfile" - exit 0 - else - if [ "$remove_duplicate" ]; then ## Add my pid and md5 if not already there. - same_cmd_pids=$( - echo "$content" | tail -n +1 | \ - egrep "^[0-9]+ $md5code$" 2>/dev/null | \ - cut -f 1 -d " ") - same_pids=() - found_myself= - for spid in $same_cmd_pids; do - if [ "$spid" == "$pid" ]; then - found_myself=true - continue - fi - same_pids+=("$spid") - done - [ "$found_myself" ] || echo "$pid $md5code" >> "$pidfile" - fi - flock -u 200 ## reopen the lock to give a chance to the other process to remove the pidfile. - if [ "$remove_duplicate" ]; then ## Add my pid and md5 if not already there. - for spid in "${same_pids[@]}"; do - if ! ensure_kill "$spid"; then - err "Couldn't kill previous duplicate command." - exit 1 - fi - done - fi - pgid=$(pgid_from_pid "$opid") - verb "PGID of previous PID is $pgid" - if is_pgid_alive "$pgid"; then - verb "Previous PGID is still alive" - if [ "$kill" ] && [ "$priority" -ge "$opriority" ]; then - if [ "$kill_count" -gt 4 ]; then - err "$pid>FATAL: GPID=$pgid has resisted kill procedure. Aborting." - exit 1 - elif [ "$kill_count" -gt 2 ]; then - err "PGID wouldn't close itself, force kill PGID: kill -9 -- -$pgid" >&2 - kill -9 -- "$pgid" - sleep 1 - else - err "Sending SIGKILL to PGID: kill -- -$pgid" >&2 - kill -- -"$pgid" - sleep 1 - fi - ((kill_count++)) - else - if [ "$nonblock" ]; then - verb "Nonblock options forces exit." - exit 1 - else - verb "Couldn't acquire Lock... (elapsed $((SECONDS - start)))" - fi - fi - else - if [ "$pgid_not_alive_count" -gt 4 ]; then - verb "$pid>A lock exists for label $label, but PGID:$pgid in it isn't alive while child $pid is ?!?." - err "$pid>Can't force seizing the lock." >&2 - exit 1 - fi - ((pgid_not_alive_count++)) - fi - fi - fi - - if [ "$timeout" ] && [ "$timeout" -lt "$((SECONDS - start))" ]; then - err "Timeout reached (${timeout}s) while waiting for lock on $label" - exit "$errcode" - fi - sleep 1 - done - ) 200> "$lockfile" -} - -remove_pid_file() { - local label=$1 - lockfile="/var/lock/lockcmd-$label.lock" - mkdir -p /var/run/lockcmd - pidfile="/var/run/lockcmd/$label.pid" - -( - verb() { [ -z "$verbose" ] || echo "$exname($label) $pid> $@" >&2 ; } - err() { echo "$exname($label) $pid> $@" >&2; } - verb "Asking lock to delete $pidfile." - timeout=5 - start=$SECONDS - while true; do - ## ask for lock on $lockfile (fd 200) - if ! flock -n -x 200; then - verb "Couldn't acquire primary lock... (elapsed $((SECONDS - start)))" - else - verb "Acquired lock '$label' on pidfile." - if ! [ -e "$pidfile" ]; then - verb "No more pidfile, somebody deleted for us ?1?" - exit 1 - fi - if ! content=$(cat "$pidfile" 2>/dev/null); then - err "Can't read $pidfile" - exit 1 - fi - read opid opriority < <(echo "$content" | head -n 1) - opriority=${opriority:-1} - if [ "$opid" == "$pid" ]; then - verb "Deleted pidfile. Releasing lock." - rm -f "$pidfile" - exit 0 - else - verb "Removing duplicates in pidfile. Releasing lock." - [ "$remove_duplicate" ] && sed -ri "/^$pid $md5code$/d" "$pidfile" - exit 0 - fi - fi - if [ "$timeout" ] && [ "$timeout" -lt "$((SECONDS - start))" ]; then - err "Timeout reached (${timeout}s) while waiting for lock on $label" - exit "$errcode" - fi - sleep 1 - done -) 200> "$lockfile" - -} - - -## appends a command to the signal handler functions -# -# example: trap_add EXIT,INT close_ssh "$ip" -trap_add() { - local sigs="$1" sig cmd old_cmd - shift || { - echo "${FUNCNAME} usage error" >&2 - return 1 - } - cmd="$@" - while IFS="," read -d "," sig; do - prev_cmd="$(trap -p "$sig")" - if [ "$prev_cmd" ]; then - new_cmd="${prev_cmd#trap -- \'}" - new_cmd="${new_cmd%\' "$sig"};$cmd" - else - new_cmd="$cmd" - fi - trap -- "$new_cmd" "$sig" || { - echo "unable to add command '$@' to trap $sig" >&2 ; - return 1 - } - done < <(echo "$sigs,") -} - -remove_all_pid_file() { - while read -d "," label; do - { - remove_pid_file "$label" || err "Could not delete $label" - } & - done < <(echo "$LOCKLABELS,") - wait -} - -## -## Code -## - -pid="$$" - -trap_add EXIT "remove_all_pid_file" -while read -d "," label; do - acquire_pid_file "$label" || exit "$errcode" & -done < <(echo "$LOCKLABELS,") -wait -if [ "$cmd" ]; then - bash -c "$cmd" -else - "${command[@]}" -fi -errlvl="$?" -exit "$?" diff --git a/cron/hooks/init b/cron/hooks/init index 9bb7410..b911813 100755 --- a/cron/hooks/init +++ b/cron/hooks/init @@ -30,6 +30,9 @@ fi exit 1 } +mkdir -p "$SERVICE_CONFIGSTORE/etc/crontabs" +touch "$SERVICE_CONFIGSTORE/etc/crontabs/root" + timezone=$(cat /etc/timezone) || exit 1 init-config-add " $CHARM_NAME: diff --git a/cron/hooks/pre_deploy b/cron/hooks/pre_deploy index 23bc254..7ab8a90 100755 --- a/cron/hooks/pre_deploy +++ b/cron/hooks/pre_deploy @@ -1,20 +1,39 @@ #!/bin/bash -## Should be executable N time in a row with same result. + +. lib/common set -e -cron_config_hash() { - debug "Adding config hash to enable recreating upon config change." - config_hash=$({ - find "$SERVICE_CONFIGSTORE/etc/cron"{,.hourly,.weekly,.daily,.monthly} \ - -type f -exec md5sum {} \; - } | md5_compat) || exit 1 - init-config-add " -$MASTER_BASE_SERVICE_NAME: - labels: - - compose.config_hash=$config_hash -" +root_crontab="$SERVICE_CONFIGSTORE/etc/crontabs/root" + +cron_content=$(set pipefail; cron:entries | tr '\0' '\n') || { + err "Failed to make cron entries" >&2 + exit 1 } +if [ -z "${cron_content::1}" ]; then + err "Unexpected empty scheduled command list." + exit 1 +fi +if [ -e "$root_crontab" ]; then + if ! [ -f "$root_crontab" ]; then + err "Destination '$root_crontab' exists and is not a file." + exit 1 + fi + current_content=$(cat "$root_crontab") + if [ "$current_content" = "$cron_content" ]; then + info "Cron entry already up to date." + exit 0 + fi +fi + + +if ! [ -d "${root_crontab%/*}" ]; then + mkdir -p "${root_crontab%/*}" +fi +printf "%s\n" "$cron_content" > "$root_crontab" +## Busybox cron uses cron.update file to rescan new cron entries +## cf: https://git.busybox.net/busybox/tree/miscutils/crond.c#n1089 +touch "${root_crontab%/*}/cron.update" -cron_config_hash || exit 1 +info "Cron entry updated ${GREEN}successfully${NORMAL}." \ No newline at end of file diff --git a/cron/lib/common b/cron/lib/common new file mode 100644 index 0000000..4053217 --- /dev/null +++ b/cron/lib/common @@ -0,0 +1,208 @@ +# -*- mode: shell-script -*- + +cron:get_config() { + local cfg="$1" + local cache_file="$CACHEDIR/$FUNCNAME.cache.$(H "$@")" \ + type value + if [ -e "$cache_file" ]; then + #debug "$FUNCNAME: SESSION cache hit $1" + cat "$cache_file" + return 0 + fi + type=$(e "$cfg" | shyaml -q get-type 2>/dev/null) || true + case "$type" in + "sequence") + while read-0-err E s; do + cron:get_config "$s" || return 1 + done < <(e "$cfg" | p-err shyaml -q get-values-0 -y) + if [ "$E" != 0 ]; then + err "Failed to parse sequence while reading config." + return 1 + fi + ;; + "struct") + while read-0-err E k v; do + while read-0-err E1 schedule lock_opts title command; do + if [ -n "$title" ]; then + err "Unexpected label specified in struct." + echo " Using struct, the key will be used as label." >&2 + echo " So you can't specify a label inner value(s)." >&2 + return 1 + fi + p0 "$schedule" "$lock_opts" "$k" "$command" + done < <(p-err cron:get_config "$v") + if [ "$E1" != 0 ]; then + err "Failed to parse value of key '$k' in struct config." + return 1 + fi + done < <(e "$cfg" | p-err shyaml -q key-values-0 -y) + if [ "$E" != 0 ]; then + err "Failed to parse key values while reading config." + return 1 + fi + ;; + "str") + ## examples: + ## (*/5 * * * *) {-k} bash -c "foo bla bla" + ## (@daily) {-p 10 -D} bash -c "foo bla bla" + value=$(e "$cfg" | yaml_get_values) || { + err "Failed to parse str while reading config." + return 1 + } + if ! [[ "$value" =~ ^[[:space:]]*([a-zA-Z0-9_-]+)?[[:space:]]*"("([^\)]+)")"[[:space:]]+\{([^\}]*)\}[[:space:]]*(.*)$ ]]; then + err "Invalid syntax, expected: 'LABEL (SCHEDULE) {LOCK_OPTIONS} COMMAND'." + echo " With LABEL being a possible empty string." >&2 + echo " Received: '$value'" >&2 + return 1 + fi + printf "%s\0" "${BASH_REMATCH[2]}" "${BASH_REMATCH[3]}" "${BASH_REMATCH[1]}" "${BASH_REMATCH[4]}" + ;; + NoneType|"") + : + ;; + *) + value=$(e "$cfg" | yaml_get_interpret) || { + err "Failed to parse value while reading config." + return 1 + } + if [[ "$value" == "$cfg" ]]; then + err "Unrecognized type '$type'." + return 1 + fi + cron:get_config "$value" || return 1 + ;; + esac > "$cache_file" + + ## if cache file is empty, this is an error + if [ ! -s "$cache_file" ]; then + err "Unexpected empty relation options." + echo " - check that you don't overwrite default options with an empty relation" >&2 + echo " - check your charm is setting default options" >&2 + echo " Original value: '$cfg'" >&2 + rm -f "$cache_file" + return 1 + fi + + cat "$cache_file" +} + + +cron:entries_from_service() { + local service="$1" relation_cfg="$2" \ + cache_file="$CACHEDIR/$FUNCNAME.cache.$(H "$@")" \ + label schedule lock_opts title command full_label + if [ -e "$cache_file" ]; then + #debug "$FUNCNAME: SESSION cache hit $1" + cat "$cache_file" + return 0 + fi + + ## XXXvlab; should factorize this with compose-core to setup relation env + export BASE_SERVICE_NAME=$service + MASTER_BASE_SERVICE_NAME=$(get_top_master_service_for_service "$service") || return 1 + MASTER_BASE_CHARM_NAME=$(get_service_charm "$MASTER_BASE_SERVICE_NAME") || return 1 + BASE_CHARM_NAME=$(get_service_charm "$service") || return 1 + BASE_CHARM_PATH=$(charm.get_dir "$BASE_CHARM_NAME") || return 1 + export MASTER_BASE_{CHARM,SERVICE}_NAME BASE_CHARM_{PATH,NAME} + + label="launch-$service" + while read-0-err E schedule lock_opts title command; do + lock_opts=($lock_opts) + if ! [[ "$schedule" =~ ^(([0-9/,*-]+[[:space:]]+){4,4}[0-9/,*-]+|@[a-z]+)$ ]]; then + err "Unrecognized schedule '$schedule'." + return 1 + fi + ## Check that label is only a simple identifier + if ! [[ "$title" =~ ^[a-zA-Z0-9_-]*$ ]]; then + err "Unexpected title '$title', please use only alphanumeric, underscore or dashes (can be empty)." + return 1 + fi + if ! lock_opts=($(cron:lock_opts "${lock_opts[@]}")); then + err "Failed to parse lock options." + return 1 + fi + if [ -z "$command" ]; then + err "Unexpected empty command." + return 1 + fi + + full_label="$label" + [ -n "$title" ] && full_label+="-$title" + + ## escape double-quotes + command=${command//\"/\\\"} + + p0 "$schedule lock ${full_label} ${lock_opts[*]} -c \"$command\" 2>&1 | awk '{ print strftime(\"%Y-%m-%d %H:%M:%S %Z\"), \$0; fflush(); }' >> /var/log/cron/${full_label}_script.log" + + done < <(p-err cron:get_config "$relation_cfg") > "$cache_file" + if [ "$E" != 0 ]; then + rm -f "$cache_file" + err "Failed to get ${DARKYELLOW}$service${NORMAL}--${DARKBLUE}schedule-command${NORMAL}-->${DARKYELLOW}$SERVICE_NAME${NORMAL}'s config." + return 1 + fi + + cat "$cache_file" +} + + +cron:lock_opts() { + local cache_file="$CACHEDIR/$FUNCNAME.cache.$(H "$@")" \ + label schedule lock_opts title command full_label + if [ -e "$cache_file" ]; then + #debug "$FUNCNAME: SESSION cache hit $1" + cat "$cache_file" + return 0 + fi + lock_opts=() + while [ "$1" ]; do + case "$1" in + "-D"|"-k") + lock_opts+=("$1") + ;; + "-p") + ## check that the value is a number + if ! [[ "$2" =~ ^[0-9]+$ ]]; then + err "Unexpected value for priority '$2' (expected an integer)." + return 1 + fi + lock_opts+=(-p "$2") + shift + ;; + "-*"|"--*") + err "Unrecognized lock option '$1'." + return 1 + ;; + *) + err "Unexpected lock argument '$1'." + return 1 + ;; + esac + shift + done + printf "%s\n" "${lock_opts[@]}" > "$cache_file" + + cat "$cache_file" +} + + +cron:entries() { + local cache_file="$CACHEDIR/$FUNCNAME.cache.$(H "$SERVICE_NAME" "$ALL_RELATIONS")" \ + s rn ts rc td + if [ -e "$cache_file" ]; then + #debug "$FUNCNAME: SESSION cache hit $1" + cat "$cache_file" + return 0 + fi + + if [ -z "$ALL_RELATIONS" ]; then + err "Expected \$ALL_RELATIONS to be set." + exit 1 + fi + export TARGET_SERVICE_NAME=$SERVICE_NAME + while read-0 service relation_cfg; do + debug "service: '$service' relation_cfg: '$relation_cfg'" + cron:entries_from_service "$service" "$relation_cfg" || return 1 + done < <(get_service_incoming_relations "$SERVICE_NAME" "schedule-command") > "$cache_file" + cat "$cache_file" +} +export -f cron:entries \ No newline at end of file diff --git a/cron/metadata.yml b/cron/metadata.yml index 131149a..f0dd998 100644 --- a/cron/metadata.yml +++ b/cron/metadata.yml @@ -1,11 +1,6 @@ description: Cron daemon config-resources: - - /etc/cron - - /etc/cron.daily - - /etc/cron.weekly - - /etc/cron.hourly - - /etc/cron.monthly - - /usr/local/bin + - /etc/crontabs data-resources: - /var/log/cron host-resources: @@ -20,7 +15,7 @@ uses: ## optional log-rotate: #constraint: required | recommended | optional #auto: pair | summon | none ## default: pair - constraint: optional + constraint: recommended solves: disk-leak: "/var/log/cron" #default-options: diff --git a/cron/test/entries_from_service b/cron/test/entries_from_service new file mode 100644 index 0000000..8902b96 --- /dev/null +++ b/cron/test/entries_from_service @@ -0,0 +1,155 @@ +#!/bin/bash + +exname=$(basename $0) + +compose_core=$(which compose-core) || { + echo "Requires compose-core executable to be in \$PATH." >&2 + exit 1 +} + +fetch-def() { + local path="$1" fname="$2" + ( . "$path" 1>&2 || { + echo "Failed to load '$path'." >&2 + exit 1 + } + declare -f "$fname" + ) +} + +prefix_cmd=" +. /etc/shlib + +include common +include parse + +. ../lib/common + +$(fetch-def "$compose_core" yaml_get_values) +$(fetch-def "$compose_core" yaml_get_interpret) +$(fetch-def "$compose_core" read-0-err) +$(fetch-def "$compose_core" p-err) +$(fetch-def "$compose_core" expand_vars) + +SERVICE_NAME='bar' + +" || { + echo "Couldn't build prefix cmd" >&2 + exit 1 +} + +# mock +cfg-get-value() { + local key="$1" + shyaml get-value "$key" 2>/dev/null +} +export -f cfg-get-value + +yaml_get_interpret() { + shyaml get-value +} +export -f yaml_get_interpret + +get_top_master_service_for_service() { + local service="$1" + echo "$service" +} +export -f get_top_master_service_for_service + +get_service_charm() { + local service="$1" + echo "$service" +} +export -f get_service_charm + +export CACHEDIR=$(mktemp -d -t tmp.XXXXXXXXXX) +export state_tmpdir=$(mktemp -d -t tmp.XXXXXXXXXX) +trap "rm -rf \"$state_tmpdir\"" EXIT +trap "rm -rf \"$CACHEDIR\"" EXIT + +## +## Tests +## + +try " +cron:entries_from_service 'foo' ''" +is errlvl 1 +is err reg "Error:.*ailed to get.*." +is err reg "Error:.*empty.*." +is out '' TRIM + +try " +cron:entries_from_service 'foo' ' +(0 0 * * *) {XX} dc run --rm foo +'" "wrong lock args" +is errlvl 1 +is err reg "Error:.*lock argument.*." +is err reg "Error:.*parse lock.*." +is out '' TRIM + + +try " +cron:entries_from_service 'foo' ' +(0 0 * * * *) {} dc run --rm foo +'" "wrong schedule" +is errlvl 1 +is err reg "Error:.*schedule.*" +is out '' TRIM + + +try " +cron:entries_from_service 'foo' ' +(0 0 * * *) {} +'" "wrong command" +is errlvl 1 +is err reg "Error:.*empty command.*" +is out '' TRIM + + +try " +set pipefail && +cron:entries_from_service 'foo' ' +(0 0 * * *) {-p 10 -k} dc run --rm foo +' | tr '\0' '\n'" "one command no label" +noerror +is out "\ +0 0 * * * lock launch-foo -p 10 -k -c \"dc run --rm foo\" 2>&1 | awk '{ print strftime(\"%Y-%m-%d %H:%M:%S %Z\"), \$0; fflush(); }' >> /var/log/cron/launch-foo_script.log\ +" TRIM + + +try " +set pipefail && +cron:entries_from_service 'foo' ' +wiz: (0 0 * * *) {-p 10 -k} dc run --rm foo +' | tr '\0' '\n'" "one command with label" +noerror +is out "\ +0 0 * * * lock launch-foo-wiz -p 10 -k -c \"dc run --rm foo\" 2>&1 | awk '{ print strftime(\"%Y-%m-%d %H:%M:%S %Z\"), \$0; fflush(); }' >> /var/log/cron/launch-foo-wiz_script.log\ +" TRIM + + + +try " +set pipefail && +cron:entries_from_service 'foo' ' +wiz: (0 0 * * *) {-p 10 -k} dc run --rm foo +bam: (@daily) {-p 10 -D -k} dc run --rm foo --hop + +' | tr '\0' '\n'" "multi command with label" +noerror +is out "\ +0 0 * * * lock launch-foo-wiz -p 10 -k -c \"dc run --rm foo\" 2>&1 | awk '{ print strftime(\"%Y-%m-%d %H:%M:%S %Z\"), \$0; fflush(); }' >> /var/log/cron/launch-foo-wiz_script.log +@daily lock launch-foo-bam -p 10 -D -k -c \"dc run --rm foo --hop\" 2>&1 | awk '{ print strftime(\"%Y-%m-%d %H:%M:%S %Z\"), \$0; fflush(); }' >> /var/log/cron/launch-foo-bam_script.log\ +" TRIM + + + +try " +set pipefail && +cron:entries_from_service 'foo' '!var-expand +(0 0 * * *) {-p 10 -k} dc run --rm \$BASE_SERVICE_NAME \$MASTER_BASE_SERVICE_NAME +' | tr '\0' '\n'" "using relation's var" +noerror +is out "\ +0 0 * * * lock launch-foo -p 10 -k -c \"dc run --rm foo foo\" 2>&1 | awk '{ print strftime(\"%Y-%m-%d %H:%M:%S %Z\"), \$0; fflush(); }' >> /var/log/cron/launch-foo_script.log" TRIM + diff --git a/cron/test/get_config b/cron/test/get_config new file mode 100644 index 0000000..2799f59 --- /dev/null +++ b/cron/test/get_config @@ -0,0 +1,151 @@ +#!/bin/bash + +exname=$(basename $0) + +compose_core=$(which compose-core) || { + echo "Requires compose-core executable to be in \$PATH." >&2 + exit 1 +} + +fetch-def() { + local path="$1" fname="$2" + ( . "$path" 1>&2 || { + echo "Failed to load '$path'." >&2 + exit 1 + } + declare -f "$fname" + ) +} + +prefix_cmd=" +. /etc/shlib + +include common +include parse + +. ../lib/common + +$(fetch-def "$compose_core" yaml_get_values) +$(fetch-def "$compose_core" yaml_get_interpret) +$(fetch-def "$compose_core" read-0-err) +$(fetch-def "$compose_core" p-err) +$(fetch-def "$compose_core" expand_vars) + +" || { + echo "Couldn't build prefix cmd" >&2 + exit 1 +} + +# mock +cfg-get-value() { + local key="$1" + shyaml get-value "$key" 2>/dev/null +} +export -f cfg-get-value + +yaml_get_interpret() { + shyaml get-value +} +export -f yaml_get_interpret + + +export CACHEDIR=$(mktemp -d -t tmp.XXXXXXXXXX) +export state_tmpdir=$(mktemp -d -t tmp.XXXXXXXXXX) +trap "rm -rf \"$state_tmpdir\"" EXIT +trap "rm -rf \"$CACHEDIR\"" EXIT + +## +## Tests +## + +try " +cron:get_config ''" +is errlvl 1 +is err reg 'Error: .*empty.*' +is out '' + +try " +cron:get_config 'xxx'" +is errlvl 1 +is err reg 'Error: .*syntax.*' +is out '' + + +try " +set pipefail && +cron:get_config '(@daily) {} /bin/true' | tr '\0' ':' +" "str simple example without label" +noerror +is out "@daily:::/bin/true:" + +try " +set pipefail && +cron:get_config 'foo (@daily) {} /bin/true' | tr '\0' ':' +" "str simple example with label" +noerror +is out "@daily::foo:/bin/true:" + +try " +set pipefail && +cron:get_config 'foo (@daily) {-p 10 -D} /bin/true' | tr '\0' ':' +" "str simple example with lock options" +noerror +is out "@daily:-p 10 -D:foo:/bin/true:" + +try " +set pipefail && +cron:get_config 'foo (*/2 * * * *) {-p 10 -D} /bin/true' | tr '\0' ':' +" "str simple example with all fields" +noerror +is out "*/2 * * * *:-p 10 -D:foo:/bin/true:" + + +try " +set pipefail && +cron:get_config '- foo (*/2 * * * *) {-p 10 -D} /bin/true' | tr '\0' ':' +" "list 1 elt with str simple example with all fields" +noerror +is out "*/2 * * * *:-p 10 -D:foo:/bin/true:" + +try " +set pipefail && +cron:get_config ' +- foo (*/2 * * * *) {-p 10 -D} /bin/true +- bar (*/3 * * * *) {-p 10 -D -k} /bin/false + +' | tr '\0' ':' +" "list 2 elts with str simple example with all fields" +noerror +is out "*/2 * * * *:-p 10 -D:foo:/bin/true:*/3 * * * *:-p 10 -D -k:bar:/bin/false:" + +try " +set pipefail && +cron:get_config ' +foo: (*/2 * * * *) {-p 10 -D} /bin/true +bar: (*/3 * * * *) {-p 10 -D -k} /bin/false + +' | tr '\0' ':' +" "struct 2 elts with str simple example with all fields" +noerror +is out "*/2 * * * *:-p 10 -D:foo:/bin/true:*/3 * * * *:-p 10 -D -k:bar:/bin/false:" + + + +try " +cron:get_config '!!float 3.7' +" "bad type" +is errlvl 1 +is err reg 'Error: .*type.*' +is out '' + + +try " +export FOO=bar +set pipefail && +cron:get_config '!var-expand (*/2 * * * *) {-p 10 -D} \"/bin/\${FOO}\"' | tr '\0' ':' +" "var-expand" +is errlvl 0 +is err '' +is out '*/2 * * * *:-p 10 -D::"/bin/bar":' + + diff --git a/cron/test/lock_opts b/cron/test/lock_opts new file mode 100644 index 0000000..4638a1e --- /dev/null +++ b/cron/test/lock_opts @@ -0,0 +1,90 @@ +#!/bin/bash + +exname=$(basename $0) + +compose_core=$(which compose-core) || { + echo "Requires compose-core executable to be in \$PATH." >&2 + exit 1 +} + +fetch-def() { + local path="$1" fname="$2" + ( . "$path" 1>&2 || { + echo "Failed to load '$path'." >&2 + exit 1 + } + declare -f "$fname" + ) +} + +prefix_cmd=" +. /etc/shlib + +include common +include parse + +. ../lib/common + +$(fetch-def "$compose_core" yaml_get_values) +$(fetch-def "$compose_core" yaml_get_interpret) +$(fetch-def "$compose_core" read-0-err) +$(fetch-def "$compose_core" p-err) + +" || { + echo "Couldn't build prefix cmd" >&2 + exit 1 +} + +# mock +cfg-get-value() { + local key="$1" + shyaml get-value "$key" 2>/dev/null +} +export -f cfg-get-value + +yaml_get_interpret() { + shyaml get-value +} +export -f yaml_get_interpret + + +export CACHEDIR=$(mktemp -d -t tmp.XXXXXXXXXX) +export state_tmpdir=$(mktemp -d -t tmp.XXXXXXXXXX) +trap "rm -rf \"$state_tmpdir\"" EXIT +trap "rm -rf \"$CACHEDIR\"" EXIT + +## +## Tests +## + +try " +cron:lock_opts ''" +noerror +is out '' TRIM + +try " +cron:lock_opts '--XXX' +" +is errlvl 1 +is err reg 'Error: .*argument.*--XXX.*' +is out '' + +try " +cron:lock_opts -p X +" +is errlvl 1 +is err reg 'Error: .*priority.*X.*integer.*' +is out '' + + +try " +cron:lock_opts -p 10 -k -D +" +noerror +is out "\ +-p +10 +-k +-D" TRIM + + diff --git a/gogocarto/hooks/schedule_commands-relation-joined b/gogocarto/hooks/schedule_commands-relation-joined deleted file mode 100755 index 3dad3c9..0000000 --- a/gogocarto/hooks/schedule_commands-relation-joined +++ /dev/null @@ -1,44 +0,0 @@ -#!/bin/bash - -## When writing relation script, remember: -## - they should be idempotents -## - they can be launched while the dockers is already up -## - they are launched from the host -## - the target of the link is launched first, and get a chance to ``relation-set`` -## - both side of the scripts get to use ``relation-get``. - -. lib/common - -set -e - -## XXXvlab: should use container name here so that it could support -## multiple postgres -label=${SERVICE_NAME} -DST=$CONFIGSTORE/$TARGET_SERVICE_NAME/etc/cron/$label - -## XXXvlab: Should we do a 'docker exec' instead ? -bin_console="dc run -u www-data --rm --entrypoint \\\"$GOGOCARTO_DIR/bin/console\\\" $MASTER_BASE_SERVICE_NAME" - -## Warning: 'docker -v' will use HOST directory even if launched from -## 'cron' container. -file_put "$DST" <&1 | ts '\%F \%T \%Z' >> /var/log/cron/${SERVICE_NAME}-checkvote_script.log - -@daily root lock ${label}-checkExternalSourceToUpdate -D -p 10 -c "\ - $bin_console app:elements:checkExternalSourceToUpdate" 2>&1 | ts '\%F \%T \%Z' >> /var/log/cron/${SERVICE_NAME}-checkExternalSourceToUpdate_script.log - -@daily root lock ${label}-notify-moderation -D -p 10 -c "\ - $bin_console app:notify-moderation" 2>&1 | ts '\%F \%T \%Z' >> /var/log/cron/${SERVICE_NAME}-notify-moderation_script.log - - -@hourly root lock ${label}-sendNewsletter -D -p 10 -c "\ - $bin_console app:users:sendNewsletter" 2>&1 | ts '\%F \%T \%Z' >> /var/log/cron/${SERVICE_NAME}-sendNewsletter_script.log - - -*/5 * * * * root lock ${label}-webhooks-post -D -p 10 -c "\ - $bin_console --env=prod app:webhooks:post" 2>&1 | ts '\%F \%T \%Z' >> /var/log/cron/${SERVICE_NAME}-webhooks-post_script.log - - -EOF -chmod +x "$DST" diff --git a/gogocarto/lib/common b/gogocarto/lib/common index 79202c6..b2bcd6a 100644 --- a/gogocarto/lib/common +++ b/gogocarto/lib/common @@ -128,7 +128,7 @@ symphony() { export COMPOSE_IGNORE_ORPHANS=true ## We don't want post deploy that is doing the final http initialization. - compose --debug -q --no-init --no-post-deploy \ + compose --debug -q --no-init --no-post-deploy --no-pre-deploy \ --without-relation="$SERVICE_NAME":web-proxy \ run \ "${symphony_docker_run_opts[@]}" \ diff --git a/gogocarto/metadata.yml b/gogocarto/metadata.yml index bb163b3..48016c3 100644 --- a/gogocarto/metadata.yml +++ b/gogocarto/metadata.yml @@ -29,8 +29,20 @@ uses: auto: summon solves: database: "main storage" - schedule-commands: + schedule-command: constraint: recommended auto: pair solves: maintenance: "Production scheduled tasks" + default-options: !bash-stdout | + + bin_console="dc exec -T -u www-data $MASTER_BASE_SERVICE_NAME \"/opt/apps/$BASE_SERVICE_NAME/bin/console\"" + + scheds=( + checkvote @daily "$bin_console app:elements:checkvote" + checkExternalSourceToUpdate @daily "$bin_console app:elements:checkExternalSourceToUpdate" + notify-moderation @daily "$bin_console app:notify-moderation" + sendNewsletter @hourly "$bin_console app:users:sendNewsletter" + webhooks-post "*/5 * * * *" "$bin_console --env=prod app:webhooks:post" + ) + printf "%s: (%s) {-D -p 10} %s\n" "${scheds[@]}" \ No newline at end of file diff --git a/letsencrypt/hooks/schedule_command-relation-joined b/letsencrypt/hooks/schedule_command-relation-joined deleted file mode 100755 index ddbd0e4..0000000 --- a/letsencrypt/hooks/schedule_command-relation-joined +++ /dev/null @@ -1,34 +0,0 @@ -#!/bin/bash - -## When writing relation script, remember: -## - they should be idempotents -## - they can be launched while the dockers is already up -## - they are launched from the host -## - the target of the link is launched first, and get a chance to ``relation-set`` -## - both side of the scripts get to use ``relation-get``. - -set -e - -label=${SERVICE_NAME}-renew -DST=$CONFIGSTORE/$TARGET_SERVICE_NAME/etc/cron/$label -LOCAL_LOG=/var/log/cron/${label}_script.log -schedule=$(relation-get schedule) - -if ! echo "$schedule" | egrep '^\s*(([0-9/,*-]+\s+){4,4}[0-9/,*-]+|@[a-z]+)\s*$' >/dev/null 2>&1; then - err "Unrecognized schedule '$schedule'." - exit 1 -fi - -## Warning: using '\' in heredoc will be removed in the final cron file, which -## is totally wanted: cron does not support multilines. - -## Warning: 'docker -v' will use HOST directory even if launched from -## 'cron' container. -file_put "$DST" <&1 | ts '\%F \%T \%Z' >> $LOCAL_LOG - -EOF -chmod +x "$DST" diff --git a/letsencrypt/lib/common b/letsencrypt/lib/common index 24fbe70..dd525ce 100644 --- a/letsencrypt/lib/common +++ b/letsencrypt/lib/common @@ -140,14 +140,14 @@ has_existing_cert() { letsencrypt_cert_info() { local domain="$1" - compose -q --no-init --no-relations run -T --rm "$SERVICE_NAME" \ + compose -q --no-hooks run -T --rm "$SERVICE_NAME" \ crt info "$domain" } letsencrypt_cert_delete() { local domain="$1" - compose --debug --no-init --no-relations run --rm "$SERVICE_NAME" \ + compose --debug --no-hooks run --rm "$SERVICE_NAME" \ certbot delete --cert-name "$domain" } @@ -159,7 +159,7 @@ valid_existing_cert() { has_existing_cert "$domain" || return 1 info "Querying $domain for previous info..." - out=$(letsencrypt_cert_info "$domain") + out=$(letsencrypt_cert_info "$domain") || return 1 ## check if output is valid yaml err=$(e "$out" | shyaml get-value 2>&1 >/dev/null) || { @@ -201,7 +201,7 @@ valid_existing_cert() { get_domain_list() { - compose -q --no-init --no-relations run --rm "$SERVICE_NAME" crt list + compose -q --no-hooks run --rm "$SERVICE_NAME" crt list } diff --git a/letsencrypt/metadata.yml b/letsencrypt/metadata.yml index 6ede7e4..cd17a28 100644 --- a/letsencrypt/metadata.yml +++ b/letsencrypt/metadata.yml @@ -26,5 +26,5 @@ uses: auto: summon solves: missing-feature: "Automatic certificate renewal" - default-options: - schedule: "30 3 * * 7" ## schedule log renewal every week + default-options: !var-expand + (30 3 * * 7) {-D -p 10} compose crt "$BASE_SERVICE_NAME" renew diff --git a/logrotate/hooks/schedule_command-relation-joined b/logrotate/hooks/schedule_command-relation-joined deleted file mode 100755 index f9950e7..0000000 --- a/logrotate/hooks/schedule_command-relation-joined +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash - -## When writing relation script, remember: -## - they should be idempotents -## - they can be launched while the dockers is already up -## - they are launched from the host -## - the target of the link is launched first, and get a chance to ``relation-set`` -## - both side of the scripts get to use ``relation-get``. - -set -e - -label=launch-$SERVICE_NAME -DST=$CONFIGSTORE/$TARGET_SERVICE_NAME/etc/cron/$label -schedule=$(relation-get schedule) || true - -if ! echo "$schedule" | egrep '^\s*(([0-9/,*-]+\s+){4,4}[0-9/,*-]+|@[a-z]+)\s*$' >/dev/null 2>&1; then - err "Unrecognized schedule '$schedule'." - exit 1 -fi - -## Warning: using '\' in heredoc will be removed in the final cron file, which -## is totally wanted: cron does not support multilines. - -## Warning: 'docker -v' will use HOST directory even if launched from -## 'cron' container. -file_put "$DST" <&1 | ts '\%F \%T \%Z' >> /var/log/cron/${label}_script.log -EOF -chmod +x "$DST" diff --git a/logrotate/metadata.yml b/logrotate/metadata.yml index 64469ec..d00f703 100644 --- a/logrotate/metadata.yml +++ b/logrotate/metadata.yml @@ -14,5 +14,6 @@ uses: auto: summon solves: missing-feature: "scheduling of log rotation" - default-options: - schedule: "0 0 * * *" ## It should really stay at midnight as most logs are dated \ No newline at end of file + default-options: !var-expand + ## It should really stay at midnight as most logs are dated + (0 0 * * *) {-p 10 -D} dc run --rm "$MASTER_BASE_SERVICE_NAME" \ No newline at end of file diff --git a/mariadb/hooks/schedule_command-relation-joined b/mariadb/hooks/schedule_command-relation-joined index 1fe8db0..640d273 100755 --- a/mariadb/hooks/schedule_command-relation-joined +++ b/mariadb/hooks/schedule_command-relation-joined @@ -6,38 +6,54 @@ ## - they are launched from the host ## - the target of the link is launched first, and get a chance to ``relation-set`` ## - both side of the scripts get to use ``relation-get``. +## - use actions of other side for other side's business logic -. lib/common +if [ -z "$RELATION_DATA_FILE" ]; then + err "$FUNCNAME: relation does not seems to be correctly setup." + exit 1 +fi +if ! [ -r "$RELATION_DATA_FILE" ]; then + err "$FUNCNAME: can't read relation's data." >&2 + exit 1 +fi -set -e -## XXXvlab: should use container name here so that it could support -## multiple mysql -label=${SERVICE_NAME}-mysql-backup -DST=$CONFIGSTORE/$TARGET_SERVICE_NAME/etc/cron/$label -schedule=$(relation-get schedule) +if [ "$(cat "$RELATION_DATA_FILE" | shyaml get-type)" == "str" ]; then + ## Cached version already there -if ! echo "$schedule" | egrep '^\s*(([0-9/,*-]+\s+){4,4}[0-9/,*-]+|@[a-z]+)\s*$' >/dev/null 2>&1; then - err "Unrecognized schedule '$schedule'." - exit 1 + ## Note that we rely on the fact that when the relations + ## options are changed in the `compose.yml` file, the relation + ## data file is recreated from the values of the + ## `compose.yml`. + info "Previous relation data is still valid." + exit 0 fi -## Warning: using '\' in heredoc will be removed in the final cron file, which -## is totally wanted: cron does not support multilines. -## Warning: 'docker -v' will use HOST directory even if launched from -## 'cron' container. -file_put "$DST" <&1 | ts '\%F \%T \%Z' >> /var/log/cron/${label}_script.log -EOF -chmod +x "$DST" + "$DOCKER_BASE_IMAGE" --host "${SERVICE_NAME}" +) + +quoted_command=() +for arg in "${command[@]}"; do + quoted_command+=("$(printf "%q" "$arg")") +done + +printf "(%s) {%s} %s\n" \ + "$schedule" \ + "${lock_opts[*]}" \ + "${quoted_command[*]}" > "$RELATION_DATA_FILE" diff --git a/mongo/hooks/schedule_command-relation-joined b/mongo/hooks/schedule_command-relation-joined index eaf0374..3078cc3 100755 --- a/mongo/hooks/schedule_command-relation-joined +++ b/mongo/hooks/schedule_command-relation-joined @@ -6,38 +6,59 @@ ## - they are launched from the host ## - the target of the link is launched first, and get a chance to ``relation-set`` ## - both side of the scripts get to use ``relation-get``. +## - use actions of other side for other side's business logic . lib/common -set -e -label=${SERVICE_NAME}-backup -DST=$CONFIGSTORE/$TARGET_SERVICE_NAME/etc/cron/$label -schedule=$(relation-get schedule) +if [ -z "$RELATION_DATA_FILE" ]; then + err "$FUNCNAME: relation does not seems to be correctly setup." + exit 1 +fi -if ! echo "$schedule" | egrep '^\s*(([0-9/,*-]+\s+){4,4}[0-9/,*-]+|@[a-z]+)\s*$' >/dev/null 2>&1; then - err "Unrecognized schedule '$schedule'." +if ! [ -r "$RELATION_DATA_FILE" ]; then + err "$FUNCNAME: can't read relation's data." >&2 exit 1 fi + +if [ "$(cat "$RELATION_DATA_FILE" | shyaml get-type)" == "str" ]; then + ## Cached version already there + + ## Note that we rely on the fact that when the relations + ## options are changed in the `compose.yml` file, the relation + ## data file is recreated from the values of the + ## `compose.yml`. + info "Previous relation data is still valid." + exit 0 +fi + +schedule=$(relation-get schedule) || true exclude_dbs=$(relation-get exclude-dbs 2>/dev/null) || true exclude_dbs=$(echo "$exclude_dbs" | shyaml get-values 2>/dev/null | nspc) || true -## Warning: 'docker -v' will use HOST directory even if launched from -## 'cron' container. -file_put "$DST" <&1 | ts '\%F \%T \%Z' >> /var/log/cron/${label}_script.log -EOF -chmod +x "$DST" +lock_opts=(-D -p 10) + + +command=( + docker run --rm + -u 0 + -e MONGO_HOST=${SERVICE_NAME} + -e exclude_dbs="$exclude_dbs" + --network ${PROJECT_NAME}_default + -v "$HOST_CHARM_STORE/${CHARM_REL_PATH#${CHARM_STORE}/}/resources/bin/mongo-backup:/usr/sbin/mongo-backup" + -v "$SERVICE_DATASTORE/var/backups/mongo:/var/backups/mongo" + --entrypoint mongo-backup + "$DOCKER_BASE_IMAGE" +) + +quoted_command=() +for arg in "${command[@]}"; do + quoted_command+=("$(printf "%q" "$arg")") +done + +printf "(%s) {%s} %s\n" \ + "$schedule" \ + "${lock_opts[*]}" \ + "${quoted_command[*]}" > "$RELATION_DATA_FILE" diff --git a/postgres/hooks/schedule_command-relation-joined b/postgres/hooks/schedule_command-relation-joined index 5312983..4905a21 100755 --- a/postgres/hooks/schedule_command-relation-joined +++ b/postgres/hooks/schedule_command-relation-joined @@ -6,40 +6,67 @@ ## - they are launched from the host ## - the target of the link is launched first, and get a chance to ``relation-set`` ## - both side of the scripts get to use ``relation-get``. +## - use actions of other side for other side's business logic . lib/common -set -e -## XXXvlab: should use container name here so that it could support -## multiple postgres -label=${SERVICE_NAME}-pg-backup -DST=$CONFIGSTORE/$TARGET_SERVICE_NAME/etc/cron/$label -schedule=$(relation-get schedule) +## +## This script has to replace `exclude-dbs` options to match definition +## from `schedule-command` interface that is awaited by the target side +## in the `pre_deploy` script. +## -if ! echo "$schedule" | egrep '^\s*(([0-9/,*-]+\s+){4,4}[0-9/,*-]+|@[a-z]+)\s*$' >/dev/null 2>&1; then - err "Unrecognized schedule '$schedule'." +if [ -z "$RELATION_DATA_FILE" ]; then + err "$FUNCNAME: relation does not seems to be correctly setup." exit 1 fi +if ! [ -r "$RELATION_DATA_FILE" ]; then + err "$FUNCNAME: can't read relation's data." >&2 + exit 1 +fi + +if [ "$(cat "$RELATION_DATA_FILE" | shyaml get-type)" == "str" ]; then + ## Cached version already there + + ## Note that we rely on the fact that when the relations + ## options are changed in the `compose.yml` file, the relation + ## data file is recreated from the values of the + ## `compose.yml`. + info "Previous relation data is still valid." + exit 0 +fi + +schedule=$(relation-get schedule) || true exclude_dbs=$(relation-get exclude-dbs 2>/dev/null) || true exclude_dbs=$(echo "$exclude_dbs" | shyaml get-values 2>/dev/null | nspc) || true -## Warning: 'docker -v' will use HOST directory even if launched from -## 'cron' container. -file_put "$DST" <&1 | ts '\%F \%T \%Z' >> /var/log/cron/pg-backup_script.log -EOF -chmod +x "$DST" + +lock_opts=(-D -p 10) + + +command=( + docker run --rm + -e PGHOST=${SERVICE_NAME} + --network ${PROJECT_NAME}_default + -e exclude_dbs="$exclude_dbs" + -v "$LOCAL_DB_PASSFILE":/root/.pgpass + -v "$HOST_CHARM_STORE/${CHARM_REL_PATH#${CHARM_STORE}/}/resources/bin/pg-backup:/usr/sbin/pg-backup" + -v "$SERVICE_DATASTORE/var/backups/pg:/var/backups/pg" + --entrypoint pg-backup + "$DOCKER_BASE_IMAGE" +) + +quoted_command=() +for arg in "${command[@]}"; do + quoted_command+=("$(printf "%q" "$arg")") +done + +printf "(%s) {%s} %s\n" \ + "$schedule" \ + "${lock_opts[*]}" \ + "${quoted_command[*]}" > "$RELATION_DATA_FILE" + echo "data: '$(cat "$RELATION_DATA_FILE")'" >&2 + diff --git a/rsync-backup/hooks/schedule_command-relation-joined b/rsync-backup/hooks/schedule_command-relation-joined index 0c87060..4bc0e6f 100755 --- a/rsync-backup/hooks/schedule_command-relation-joined +++ b/rsync-backup/hooks/schedule_command-relation-joined @@ -6,18 +6,33 @@ ## - they are launched from the host ## - the target of the link is launched first, and get a chance to ``relation-set`` ## - both side of the scripts get to use ``relation-get``. +## - use actions of other side for other side's business logic . lib/common -set -e - -schedule=$(relation-get schedule) +if [ -z "$RELATION_DATA_FILE" ]; then + err "$FUNCNAME: relation does not seems to be correctly setup." + exit 1 +fi -if ! echo "$schedule" | egrep '^\s*(([0-9/,*-]+\s+){4,4}[0-9/,*-]+|@[a-z]+)\s*$' >/dev/null 2>&1; then - err "Unrecognized schedule '$schedule'." +if ! [ -r "$RELATION_DATA_FILE" ]; then + err "$FUNCNAME: can't read relation's data." >&2 exit 1 fi +if [ "$(cat "$RELATION_DATA_FILE" | shyaml get-type)" == "str" ]; then + ## Cached version already there + + ## Note that we rely on the fact that when the relations + ## options are changed in the `compose.yml` file, the relation + ## data file is recreated from the values of the + ## `compose.yml`. + info "Previous relation data is still valid." + exit 0 +fi + +set -e + private_key=$(options-get private-key) || exit 1 target=$(options-get target) || exit 1 ident=$(options-get ident) || exit 1 @@ -29,21 +44,29 @@ host_path_key="$SERVICE_CONFIGSTORE${local_path_key}" echo "$private_key" | file_put "$host_path_key/id_rsa" chmod 600 "$host_path_key/id_rsa" -label="${SERVICE_NAME}" -DST=$CONFIGSTORE/$TARGET_CHARM_NAME/etc/cron/$label - -## Warning: using '\' in heredoc will be removed in the final cron file, which -## is totally wanted: cron does not support multilines. -file_put "$DST" <&1 | ts '\%F \%T' >> /var/log/cron/${label}_script.log -EOF -chmod +x "$DST" +schedule=$(relation-get schedule) || true + +lock_opts=(-D -p 10 -k) + +command=( + docker run --rm + -e LABEL_HOSTNAME="$ident" + -v "$RSYNC_CONFIG_DIR:/etc/rsync" + -v "$host_path_key:$local_path_key" + -v "$HOST_DATASTORE:/mnt/source" + -v "$HOST_COMPOSE_YML_FILE:/mnt/source/compose.yml" + --network ${PROJECT_NAME}_default + "$DOCKER_BASE_IMAGE" + /mnt/source "$target" +) + +quoted_command=() +for arg in "${command[@]}"; do + quoted_command+=("$(printf "%q" "$arg")") +done + +printf "(%s) {%s} %s\n" \ + "$schedule" \ + "${lock_opts[*]}" \ + "${quoted_command[*]}" > "$RELATION_DATA_FILE" + diff --git a/sftp/lib/common b/sftp/lib/common index ee96789..e10331b 100644 --- a/sftp/lib/common +++ b/sftp/lib/common @@ -127,7 +127,7 @@ make_build_script() { while read-0 key; do keys+="$key"$'\n' - done < <(echo "$user_def" | shyaml get-values-0 -q keys) + done < <(echo "$user_def" | shyaml -q get-values-0 keys) if [ "$keys" ]; then code+="mkdir -p \"/home/$user/.ssh\""$'\n' code+="cat < /home/$user/.ssh/authorized_keys"$'\n'