Compare commits

...

3 Commits

  1. 6
      bind/hooks/log_rotate-relation-joined
  2. 17
      logrotate/build/Dockerfile
  3. 7
      logrotate/build/src/entrypoint.sh
  4. 5
      odoo-tecnativa/hooks/init
  5. 6
      odoo-tecnativa/hooks/log_rotate-relation-joined
  6. 4
      odoo-tecnativa/hooks/postgres_database-relation-joined
  7. 10
      odoo-tecnativa/lib/common
  8. 17
      rsync-backup-target/build/Dockerfile
  9. 108
      rsync-backup-target/build/src/usr/local/sbin/import-log-chunks
  10. 75
      rsync-backup-target/hooks/init
  11. 24
      rsync-backup-target/hooks/log_rotate-relation-joined
  12. 8
      rsync-backup-target/metadata.yml

6
bind/hooks/log_rotate-relation-joined

@ -6,7 +6,9 @@
set -e
named_uid=$(docker_get_uid "$SERVICE_NAME" "named")
named_uid_gid=$(docker_get_uid_gid "$SERVICE_NAME" "named" "named")
uid=${named_uid_gid%%$'\n'*}
gid=${named_uid_gid#*$'\n'}
LOGS=/var/log/bind
mkdir -p "$SERVICE_DATASTORE/$LOGS"
@ -30,7 +32,7 @@ file_put "$DST" <<EOF
compress
delaycompress
notifempty
create 640
create 0640 :$uid :$gid
sharedscripts
postrotate
dc exec $SERVICE_NAME /usr/sbin/rndc reconfig > /dev/null 2>/dev/null || true

17
logrotate/build/Dockerfile

@ -1,4 +1,16 @@
FROM docker.0k.io/alpine
FROM docker.0k.io/alpine:3.20 as builder
## XXXvlab: see https://github.com/logrotate/logrotate/issues/650
## for why we need to build our own logrotate
RUN apk add --no-cache build-base git autoconf automake popt-dev libtool make xz
RUN cd /tmp && git clone https://github.com/logrotate/logrotate.git && \
cd /tmp/logrotate && git checkout d57dff56edce193abf7a660da3635be89e57fc8e && \
./autogen.sh && ./configure && make && make install
FROM docker.0k.io/alpine:3.20
## dependency of docker-send-signal
RUN apk add curl
@ -7,6 +19,9 @@ RUN apk add jq
RUN apk add logrotate && \
sed -ri 's/^(\/var\/log\/messages \{\})$/# \1/g' /etc/logrotate.conf
## Replace logrotate 3.21 from alpine with our 3.22+ version
COPY --from=builder /usr/local/sbin/logrotate /usr/sbin/logrotate
COPY src/ /
ENTRYPOINT ["/entrypoint.sh"]

7
logrotate/build/src/entrypoint.sh

@ -1,13 +1,12 @@
#!/bin/sh
# Clean non existent log file entries from status file
cd /var/lib/logrotate
cd /var/lib/logrotate || return 1
test -e status || touch status
head -1 status > status.clean
sed 's/"//g' status | while read logfile date
do
sed 's/"//g' status | while read -r logfile date; do
[ -e "$logfile" ] && echo "\"$logfile\" $date"
done >> status.clean
mv status.clean status
/usr/sbin/logrotate -v -s /var/lib/logrotate/status /etc/logrotate.conf
exec /usr/sbin/logrotate -v -s /var/lib/logrotate/status /etc/logrotate.conf

5
odoo-tecnativa/hooks/init

@ -17,11 +17,12 @@ LIB="$SERVICE_DATASTORE/var/lib/odoo"
set -e
odoo_uid=$(get_odoo_uid)
odoo_uid_gid=$(docker_get_uid_gid "$SERVICE_NAME" "odoo" "odoo")
odoo_uid=${odoo_uid_gid%%$'\n'*}
mkdir -p "$LIB"
## XXXvlab: this one can fail if files are removed (from sessions dir)
find "$LIB" \! -user "$odoo_uid" -print0 | while read-0 f; do
find "$LIB" \! -uid "$odoo_uid" -print0 | while read-0 f; do
chown -v "$odoo_uid" "$f" || exit 1
done

6
odoo-tecnativa/hooks/log_rotate-relation-joined

@ -6,7 +6,9 @@
set -e
odoo_uid=$(get_odoo_uid)
uid_gid=$(docker_get_uid_gid "$MASTER_BASE_SERVICE_NAME" "odoo" "odoo")
uid=${uid_gid%%$'\n'*}
gid=${uid_gid#*$'\n'}
LOGS=/var/log/odoo
mkdir -p "$SERVICE_DATASTORE/$LOGS"
@ -33,7 +35,7 @@ file_put "$DST" <<EOF
compress
delaycompress
notifempty
create 640
create 0640 :$uid :$gid
sharedscripts
}
EOF

4
odoo-tecnativa/hooks/postgres_database-relation-joined

@ -52,7 +52,9 @@ db_user = $USER
db_password = $PASSWORD
EOF
odoo_uid=$(get_odoo_uid)
odoo_uid_gid=$(docker_get_uid_gid "$MASTER_BASE_SERVICE_NAME" "odoo" "odoo")
odoo_uid=${odoo_uid_gid%%$'\n'*}
chown "$odoo_uid" "$CONFIG" && chmod 600 "$CONFIG"

10
odoo-tecnativa/lib/common

@ -1,15 +1,5 @@
# -*- mode: shell-script -*-
get_odoo_uid() {
uid_label="odoo"
odoo_uid=$(cached_cmd_on_base_image "$SERVICE_NAME" "id -u \"$uid_label\"") || {
debug "Failed to query for '$uid_label' uid in ${DARKYELLOW}$SERVICE_NAME${NORMAL} base image."
return 1
}
info "openerp uid from ${DARKYELLOW}$SERVICE_NAME${NORMAL} is '$odoo_uid'"
echo "$odoo_uid"
}
sql() {
local dbname="$1"
(

17
rsync-backup-target/build/Dockerfile

@ -1,11 +1,12 @@
FROM alpine:3.9
FROM alpine:3.20
MAINTAINER Valentin Lab <valentin.lab@kalysto.org>
## coreutils is for ``date`` support of ``--rfc-3339=seconds`` argument.
## findutils is for ``find`` support of ``--newermt`` argument.
## gawk is for ``awk`` support of unicode strings.
RUN apk add rsync sudo bash openssh-server coreutils findutils gawk
## btrfs-progs is for ``btrfs`` support for snapshotting capacity
RUN apk add rsync sudo bash openssh-server coreutils findutils gawk btrfs-progs
RUN ssh-keygen -A
## New user/group rsync/rsync with home dir in /var/lib/rsync
@ -27,6 +28,18 @@ RUN chmod 440 /etc/sudoers.d/*
RUN mkdir /var/run/sshd
ENV SCRIPT_LOGCHUNK_SHA="0.1.0"
RUN apk add curl; export pkg ; \
for pkg in logchunk; do \
echo "Getting $pkg..." ; \
bash -c -- 'varname=${pkg^^} ; varname=${varname//-/_} ; \
eval curl https://docker.0k.io/downloads/$pkg-\${SCRIPT_${varname^^}_SHA}' > \
/usr/local/bin/"$pkg" || exit 1 ; \
chmod +x /usr/local/bin/"$pkg" ; \
done
COPY ./entrypoint.sh /entrypoint.sh
EXPOSE 22

108
rsync-backup-target/build/src/usr/local/sbin/import-log-chunks

@ -0,0 +1,108 @@
#!/bin/bash
RSYNC_LOG_PATH="${RSYNC_LOG_PATH:-/var/log/rsync}"
RSYNC_DB_FILE="${RSYNC_DB_FILE:-$RSYNC_LOG_PATH/logchunks.db}"
RSYNC_FAILED_CHUNKS_PATH="${RSYNC_FAILED_CHUNKS_PATH:-$RSYNC_LOG_PATH/failed_chunks}"
is_btrfs_subvolume() {
local dir=$1
[ "$(stat -f --format="%T" "$dir")" == "btrfs" ] || return 1
inode="$(stat --format="%i" "$dir")"
case "$inode" in
2|256)
return 0;;
*)
return 1;;
esac
}
time_now() { date +%s.%3N; }
time_elapsed() { echo "scale=3; $2 - $1" | bc; }
if ! [ -d "$RSYNC_LOG_PATH" ]; then
echo "Error: RSYNC_LOG_PATH is not a directory: $RSYNC_LOG_PATH" >&2
exit 1
fi
if ! is_btrfs_subvolume "$RSYNC_LOG_PATH"; then
echo "Error: RSYNC_LOG_PATH is not a Btrfs subvolume: $RSYNC_LOG_PATH" >&2
exit 1
fi
for cmd in btrfs logchunk; do
if ! type -p "$cmd" >/dev/null; then
echo "Error: $cmd command not found" >&2
exit 1
fi
done
if ! [ -d "$RSYNC_FAILED_CHUNKS_PATH" ]; then
mkdir -p "$RSYNC_FAILED_CHUNKS_PATH" || {
echo "Error: Failed to create RSYNC_FAILED_CHUNKS_PATH directory: $RSYNC_FAILED_CHUNKS_PATH" >&2
exit 1
}
fi
rsync_log_work_dir="${RSYNC_LOG_PATH}.logchunk"
if [ -e "$rsync_log_work_dir" ]; then
echo "Error: RSYNC_LOG_PATH work directory already exists: $rsync_log_work_dir" >&2
exit 1
fi
btrfs subvolume snapshot -r "$RSYNC_LOG_PATH" "$rsync_log_work_dir" || {
echo "Error: Failed to create snapshot of RSYNC_LOG_PATH" >&2
exit 1
}
trap "btrfs subvolume delete '$rsync_log_work_dir'" EXIT
start=$(time_now)
for log_file in "$rsync_log_work_dir"/target_*_rsync.log; do
ident="${log_file##*/}"
ident="${ident#target_}"
ident="${ident%_rsync.log}"
errors=0
chunks=0
start_ident=$(time_now)
start_log_line="${start_ident%.*}"
echo "$ident:"
last_chunk_count=0
last_error_count=0
while true; do
logchunk next -c logchunk "$log_file" |
logchunk import "${RSYNC_DB_FILE}" "$ident" "$RSYNC_FAILED_CHUNKS_PATH" 2>&1 |
sed -r "s/^/ | /"
pipe_status=("${PIPESTATUS[@]}")
if [ "${pipe_status[0]}" == 1 ]; then
## no new chunks
break
fi
if [ "${pipe_status[0]}" == 127 ]; then
echo "Error: fatal !" >&2
exit 1
fi
errlvl="${pipe_status[1]}"
if [ "$errlvl" != 0 ]; then
errors=$((errors + 1))
fi
chunks=$((chunks + 1))
now=$(time_now)
now="${now%.*}"
if [ $((now - start_log_line)) -gt 15 ]; then
rate=$(echo "scale=2; ($chunks - $last_chunk_count) / ($now - $start_log_line)" | bc)
echo " |~ processed $((chunks - last_chunk_count)) chunks with $((errors - last_error_count)) errors ($rate chunks/s)"
start_log_line="$now"
last_chunk_count=$chunks
last_error_count=$errors
fi
done
if [ "$chunks" != 0 ]; then
elapsed_ident="$(time_elapsed "$start_ident" "$(time_now)")" || exit 1
echo " .. processed $chunks chunks with $errors errors in ${elapsed_ident}s"
fi
done
elapsed="$(time_elapsed "$start" "$(time_now)")" || exit 1
echo "Processed all logs in ${elapsed}s"

75
rsync-backup-target/hooks/init

@ -55,6 +55,19 @@ rebuild-config() {
e "$control_users" > "$CONTROL_USERS_FILE"
}
is_btrfs_subvolume() {
local dir=$1
[ "$(stat -f --format="%T" "$dir")" == "btrfs" ] || return 1
inode="$(stat --format="%i" "$dir")"
case "$inode" in
2|256)
return 0;;
*)
return 1;;
esac
}
local_path_key=/etc/rsync/keys/admin
host_path_key="$SERVICE_CONFIGSTORE${local_path_key}"
@ -63,12 +76,74 @@ CONTROL_USERS_FILE="$SERVICE_DATASTORE/.control-pass"
## Was it already properly propagated to database ?
control_users=$(H "${admin_keys}" "$(declare -f "rebuild-config")")
if ! out=$(stat -f -c %T "$SERVICE_DATASTORE"/var/log 2>&1); then
err "Command 'stat' failed with error:"
echo "$out" | prefix " ${GRAY}|${NORMAL} " >&2
exit 1
fi
compose_fragments=""
if [ "$out" == "btrfs" ]; then
## We'll need to add SYS_ADMIN capability to the container to
## allow it to delete snapshots
compose_fragments+="\
cap_add:
- SYS_ADMIN
"
RSYNC_LOG_PATH="$SERVICE_DATASTORE/var/log/rsync"
if ! is_btrfs_subvolume "$RSYNC_LOG_PATH"; then
previous_contents=
if [ -e "$RSYNC_LOG_PATH" ]; then
previous_contents=1
info "Directory '$RSYNC_LOG_PATH' exists but is not a btrfs subvolume."
## we want to keep the data, so we'll move it to a temporary location
mv "$RSYNC_LOG_PATH" "${RSYNC_LOG_PATH}.bak"
fi
if ! out=$(btrfs subvolume create "$RSYNC_LOG_PATH" 2>&1); then
err "Command 'btrfs subvolume create' failed with error:"
echo "$out" | prefix " ${GRAY}|${NORMAL} " >&2
if [ -n "$previous_contents" ]; then
info "Restoring previous contents of '$RSYNC_LOG_PATH'"
mv "${RSYNC_LOG_PATH}.bak" "$RSYNC_LOG_PATH" || exit 1
fi
exit 1
fi
if [ -n "$previous_contents" ]; then
info "Moving previous contents of '$RSYNC_LOG_PATH' into the new subvolume."
(
## avoid sending our env to find,
## to prevent any "The environment is too large for exec" error
env -i PATH=/bin:/usr/bin \
find "${RSYNC_LOG_PATH}.bak" \
-mindepth 1 -maxdepth 1 \
-exec cp -a {} "$RSYNC_LOG_PATH/" \;
) && rm -rf "${RSYNC_LOG_PATH}.bak" || {
err "Failed to copy previous contents of '$RSYNC_LOG_PATH' into the new subvolume."
rmdir "$RSYNC_LOG_PATH" || {
err "Failed to delete the newly created subvolume."
echo " Couldn't restore previous state !!" >&2
exit 1
}
mv "${RSYNC_LOG_PATH}.bak" "$RSYNC_LOG_PATH" || {
err "Failed to restore previous contents of '$RSYNC_LOG_PATH'."
echo " Couldn't restore previous state !!" >&2
exit 1
}
exit 1
}
fi
fi
fi
init-config-add "\
$SERVICE_NAME:
volumes:
- $host_path_key:$local_path_key
labels:
- compose.config_hash=$control_users
$compose_fragments
"
if [ -e "$CONTROL_USERS_FILE" ] && [ "$control_users" == "$(cat "$CONTROL_USERS_FILE")" ]; then

24
rsync-backup-target/hooks/log_rotate-relation-joined

@ -2,17 +2,23 @@
## Should be executable N time in a row with same result.
. lib/common
set -e
uid=$(docker_get_uid "$SERVICE_NAME" "rsync")
uid_gid=$(docker_get_uid_gid "$SERVICE_NAME" "rsync" "rsync")
uid=${uid_gid%%$'\n'*}
gid=${uid_gid#*$'\n'}
LOGS=/var/log/rsync
mkdir -p "$SERVICE_DATASTORE/$LOGS"
touch "$SERVICE_DATASTORE/$LOGS/ssh-cmd-validate.log"
chown -v "$uid" "$SERVICE_DATASTORE/$LOGS" "$SERVICE_DATASTORE/$LOGS/ssh-cmd-validate.log"
chmod -v 0640 "$SERVICE_DATASTORE/$LOGS/ssh-cmd-validate.log"
touch "$SERVICE_DATASTORE/$LOGS/ssh-admin-cmd-validate.log"
chown -v "$uid:$gid" "$SERVICE_DATASTORE/$LOGS" "$SERVICE_DATASTORE/$LOGS/ssh-admin-cmd-validate.log"
chmod -v 0660 "$SERVICE_DATASTORE/$LOGS/ssh-admin-cmd-validate.log"
rotated_count=$(relation-get rotated-count 2>/dev/null) || true
rotated_count=${rotated_count:-52}
@ -33,7 +39,9 @@ file_put "$DST" <<EOF
compress
delaycompress
notifempty
create 640 $uid
## XXXvlab: see https://github.com/logrotate/logrotate/issues/650
## for the syntax headache (and it relies on correct version of logrotate)
create 0640 :$uid :0
sharedscripts
}
@ -49,7 +57,9 @@ file_put "$DST" <<EOF
compress
delaycompress
notifempty
create 660 $uid
## XXXvlab: see https://github.com/logrotate/logrotate/issues/650
## for the syntax headache (and it relies on correct version of logrotate)
create 0660 :$uid :$gid
sharedscripts
}
@ -65,7 +75,7 @@ file_put "$DST" <<EOF
compress
delaycompress
notifempty
create 640
create 0640 :0 :0
sharedscripts
}
EOF

8
rsync-backup-target/metadata.yml

@ -3,6 +3,7 @@ data-resources:
- /etc/rsync/keys
- /var/mirror
- /var/log/rsync
- /var/spool/logchunk
uses:
log-rotate:
@ -13,3 +14,10 @@ uses:
solves:
unmanaged-logs: "in docker logs"
#default-options:
schedule-command:
constraint: required
auto: summon
solves:
missing-feature: "import log chunk in database"
default-options: !var-expand
(0 * * * *) {-p 10 -D} dc exec -T "$MASTER_BASE_SERVICE_NAME" import-log-chunks
Loading…
Cancel
Save