13 Commits

  1. 4
      README.org
  2. 129
      bin/0km
  3. 53
      bin/myc-update
  4. 152
      bin/vps
  5. 1
      etc/sysctl.d/90-inotify_watches

4
README.org

@ -1193,14 +1193,14 @@ compose --debug up
Si cette commande ne fonctionne pas, prendre le temps de bien lire le
message d'erreur.
**** Vider les cache de ~/var/cache/compose~
**** Vider les cache de ~compose~
En cas de problème non expliqués et inédits, il est bon de vérifier si
l'effacement des caches de compose ne permet pas de corriger le
problème :
#+begin_src sh
rm /var/cache/compose/*
compose --debug cache clear
#+end_src
Puis relancer la commande qui ne fonctionne pas (par exemple ~compose

129
bin/0km

@ -259,7 +259,7 @@ vps_check() {
fi </dev/null
compose_content=$(ssh:run "root@$vps" -- cat /opt/apps/myc-deploy/compose.yml </dev/null) ||
{ echo "${DARKRED}no-compose${NORMAL}"; return 1; }
echo "$compose_content" | grep backup >/dev/null 2>&1 ||
echo "$compose_content" | yq -e ".rsync-backup" >/dev/null 2>&1 ||
{ echo "${DARKRED}no-backup${NORMAL}"; return 1; }
}
@ -656,6 +656,92 @@ EOF
}
NTFY_TOPIC_FILE="/etc/ntfy/topics.yml"
subscribe:ntfy:exists() {
local vps="$1"
if ! out=$(echo "[ -f \"$NTFY_TOPIC_FILE\" ] && echo ok || true" | \
ssh:run "root@$vps" -- bash); then
err "Unable to check for existence of '$NTFY_TOPIC_FILE'."
fi
if [ -z "$out" ]; then
err "File '$NTFY_TOPIC_FILE' not found on $vps."
return 1
fi
}
ntfy:rm() {
local channel="$1" topic="$2" vps="$3"
subscribe:ntfy:exists "$vps" || return 1
if ! out=$(echo "yq -i 'del(.[\"$channel\"][] | select(. == \"$TOPIC\"))' \"$NTFY_TOPIC_FILE\"" | \
ssh:run "root@$vps" -- bash); then
err "Failed to remove channel '$channel' from '$NTFY_TOPIC_FILE'."
return 1
fi
info "Channel '$channel' removed from '$NTFY_TOPIC_FILE' on $vps."
ssh:run "root@$vps" -- cat "$NTFY_TOPIC_FILE"
}
ntfy:add() {
local vps="$1"
read-0 channel topic || {
err "Couldn't read CHANNEL and TOPIC arguments."
return 1
}
vps_connection_check "$vps" </dev/null || return 1
subscribe:ntfy:exists "$vps" || return 1
if ! out=$(echo "yq '. | has(\"$channel\")' \"$NTFY_TOPIC_FILE\"" | \
ssh:run "root@$vps" -- bash); then
err "Failed to check if channel '$channel' with topic '$topic' is already in '$NTFY_TOPIC_FILE'."
return 1
fi
if [ "$out" != "true" ]; then
## Channel does not exist
if ! out=$(echo "yq -i '.[\"$channel\"] = []' \"$NTFY_TOPIC_FILE\"" | \
ssh:run "root@$vps" -- bash); then
err "Failed to create a new channel '$channel' entry in '$NTFY_TOPIC_FILE'."
return 1
fi
else
## Channel exists
if ! out=$(echo "yq '.[\"$channel\"] | any_c(. == \"$topic\")' \"$NTFY_TOPIC_FILE\"" | \
ssh:run "root@$vps" -- bash); then
err "Failed to check if channel '$channel' with topic '$topic' is already in '$NTFY_TOPIC_FILE'."
return 1
fi
if [ "$out" == "true" ]; then
info "Channel '$channel' with topic '$topic' already exists in '$NTFY_TOPIC_FILE'."
return 0
fi
fi
if ! out=$(echo "yq -i '.[\"$channel\"] += [\"$topic\"]' \"$NTFY_TOPIC_FILE\"" | \
ssh:run "root@$vps" -- bash); then
err "Failed to add channel '$channel' with topic '$topic' to '$NTFY_TOPIC_FILE'."
return 1
fi
info "Channel '$channel' added with topic '$topic' to '$NTFY_TOPIC_FILE' on $vps."
}
NTFY_SERVER="https://ntfy.0k.io"
subscribe:add() {
local vps="$1"
read-0 channel topic || {
err "Couldn't read CHANNEL and TOPIC arguments."
return 1
}
vps_connection_check "$vps" </dev/null || return 1
ntfy:add "$channel" "$topic" "$vps"
}
subscribe:rm() {
local vps="$1"
read-0 channel topic || {
err "Couldn't read CHANNEL and TOPIC arguments."
return 1
}
vps_connection_check "$vps" </dev/null || return 1
ntfy:rm "$channel" "$topic" "$vps"
}
vps_backup_recover() {
local vps="$1" admin server id path rtype force type
@ -1594,4 +1680,45 @@ graph:def:load_avg() {
}
cmdline.spec.gnu vps-subscribe
cmdline.spec::cmd:vps-subscribe:run() {
:
}
cmdline.spec.gnu add
cmdline.spec:vps-subscribe:cmd:add:run() {
: :posarg: CHANNEL 'Channel which will be sent to given topic'
: :posarg: TOPIC 'Ntfy topic to recieve messages of given channel
(format: "[MYSERVER:]MYTOPICS"
Examples: "ntfy.0k.io:main,storage,alerts",
"main{1,3,7}"
)'
: :posarg: [VPS...] 'Target host(s) to get stats'
printf "%s\0" "$CHANNEL" "$TOPIC" |
vps_mux subscribe:add "${VPS[@]}"
}
cmdline.spec.gnu rm
cmdline.spec:vps-subscribe:cmd:rm:run() {
: :posarg: CHANNEL 'Channel which will be sent to given topic'
: :posarg: TOPIC 'Ntfy topic to recieve messages of given channel
(format: "[MYSERVER:]MYTOPICS"
Examples: "ntfy.0k.io:main,storage,alerts",
"main{1,3,7}"
)'
: :posarg: [VPS...] 'Target host(s) to get stats'
printf "%s\0" "$CHANNEL" "$TOPIC" |
vps_mux subscribe:rm "${VPS[@]}"
}
cmdline::parse "$@"

53
bin/myc-update

@ -7,6 +7,37 @@ include common
include pretty
MIN_DISK_SPACE="${MIN_DISK_SPACE:-300M}"
## convert human size to bytes using numfmt
## Check remaining disk space
if [ -n "$MIN_DISK_SPACE" ]; then
min_disk_space_kbytes=$(numfmt --from=iec --to-unit=1024 "$MIN_DISK_SPACE") || {
err "Invalid format for '\$MIN_DISK_SPACE'."
exit 1
}
if ! remaining_kbytes=$(df / | awk 'NR==2 {print $4}'); then
err "Failed to get remaining disk space."
exit 1
fi
if [ "$remaining_kbytes" -lt "$min_disk_space_kbytes" ]; then
err "Not enough disk space."
human_min_dist_space=$(numfmt --to=iec --format="%.2f" --from-unit=1024 "$min_disk_space_kbytes") || {
err "Failed to convert '\$MIN_DISK_SPACE' to human readable format."
exit 1
}
human_remaining_kbytes=$(numfmt --to=iec --format="%.2f" --from-unit=1024 "$remaining_kbytes") || {
err "Failed to convert '\$remaining_kbytes' to human readable format."
exit 1
}
echo " - At least $human_min_dist_space are required." >&2
echo " - Only $human_remaining_kbytes are available." >&2
exit 1
fi
fi
start=$SECONDS
if [ -z "$NO_UPDATE" -a -d "/opt/apps/myc-manage" ]; then
@ -61,10 +92,28 @@ docker pull docker.0k.io/letsencrypt
EOF
Wrap -d "Updating cron scripts" <<EOF || exit 1
ln -sfn /opt/apps/myc-manage/etc/cron.d/* /etc/cron.d/
find -L /etc/cron.d -maxdepth 1 -type l -ilname /opt/apps/myc-manage/etc/cron.d/\* -delete
for d in /etc/cron.{d,daily,hourly,monthly,weekly}; do
ln -sfn "/opt/apps/myc-manage\$d/"* "\$d/" &&
find -L "\$d" -maxdepth 1 -type l -ilname "/opt/apps/myc-manage\$d/"\* -delete
done
EOF
Wrap -d "Updating sysctl scripts" <<EOF || exit 1
for d in /etc/sysctl.d; do
ln -sfn "/opt/apps/myc-manage\$d/"* "\$d/" &&
find -L "\$d" -maxdepth 1 -type l -ilname "/opt/apps/myc-manage\$d/"\* -delete
done
EOF
if [ -f "/root/.bashrc" ]; then
Wrap -d "Enable colors in bash" <<'EOF' || exit 1
sed -ri 's/^# (export LS_OPTIONS=.--color=auto.)/\1/;
s/^# (eval "`dircolors`")/\1/;
s/^# (alias ls='"'ls \\\$LS_OPTIONS'"')/\1/' /root/.bashrc
EOF
fi
for keyfile in {/root,/home/debian}/.ssh/authorized_keys; do
[ -e "$keyfile" ] || continue
sed -ri 's%^ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDri3GHzDt0Il0jv6zLjwkge48dN9tv11sqVNnKoDeUxzk4kn7Ng5ldd3p6dYL6Pa5NDqJUAhO/d/q08IWuwfEbtj8Yc/EkahcRwVD2imPceUeDgyCaOJhq7WO4c9d9yG8PnRO2\+Zk92a9L5vuELVLr4UHIQOs2/eFRY2/ODV8ebf5L1issGzfLd/IPhX5oJwMwKfqIFOP7KPQ26duHNRq4bYOD9ePW4shfxmyQDk6dSImFat05ErT\+X7703PcPx/PX2AIqqz95zqM6M26BywAohuaD5joxKgkd/mMIJylvT8GEYDlcLMHwnM7LtwtyJ1O9dkVpsibIqGy20KlAOGPf admin@0k$%ssh-ed25519 AAAAC3NzaC1lZDI1NTE5AAAAIMV3USt/BLnXnUk7rk8v42mISZaXBZuULbh2vx2Amk7k admin@old0kreplacement%g' "$keyfile"

152
bin/vps

@ -436,11 +436,11 @@ compose:install-backup() {
ping_check "$host" || return 1
if [ -e "/root/.ssh/rsync_rsa" ]; then
warn "deleting private key in /root/.ssh/rsync_rsa, has we are not using it anymore."
warn "deleting private key in /root/.ssh/rsync_rsa, as we are not using it anymore."
rm -fv /root/.ssh/rsync_rsa
fi
if [ -e "/root/.ssh/rsync_rsa.pub" ]; then
warn "deleting public key in /root/.ssh/rsync_rsa.pub, has we are not using it anymore."
warn "deleting public key in /root/.ssh/rsync_rsa.pub, as we are not using it anymore."
rm -fv /root/.ssh/rsync_rsa.pub
fi
@ -887,8 +887,44 @@ export -f cyclos:unlock
rocketchat:drop-indexes() {
local project_name="$1" dbname="$2"
echo "db.users.dropIndexes()" |
compose:mongo "${project_name}" "${dbname}"
compose:mongo "${project_name}" "${dbname}" <<'EOF'
db.users.dropIndexes();
// Check if the 'rocketchat_uploads' collection exists
var collections = db.getCollectionNames();
if (collections.indexOf('rocketchat_uploads') !== -1) {
db.rocketchat_uploads.dropIndexes();
}
if (collections.indexOf('rocketchat_read_receipts') !== -1) {
db.rocketchat_read_receipts.dropIndexes();
var duplicates = [];
db.getCollection("rocketchat_read_receipts").aggregate([
{
"$group": {
"_id": { "roomId": "$roomId", "userId": "$userId", "messageId": "$messageId" },
"uniqueIds": { "$addToSet": "$_id" },
"count": { "$sum": 1 }
}
},
{ "$match": { "count": { "$gt": 1 } } }
],
{ allowDiskUse: true }
).forEach(function (doc) {
// remove 1st element
doc.uniqueIds.shift();
doc.uniqueIds.forEach(function (dupId) {
duplicates.push(dupId);
}
)
})
// printjson(duplicates);
db.getCollection("rocketchat_read_receipts").remove({ _id: { $in: duplicates } });
}
EOF
}
export -f rocketchat:drop-indexes
@ -916,9 +952,25 @@ compose:get_cron_docker_cmd() {
local cron_line cmd_line docker_cmd
project_name=$(compose:project_name) || return 1
if ! cron_line=$(docker exec "${project_name}"_cron_1 cat /etc/cron.d/rsync-backup | grep "\* \* \*"); then
err "Can't find cron_line in cron container."
echo " Have you forgotten to run 'compose up' ?" >&2
container=$(compose:service:containers "${project_name}" "cron") || {
err "Can't find service 'cron' in project ${project_name}."
return 1
}
if docker exec "$container" test -e /etc/cron.d/rsync-backup; then
if ! cron_line=$(docker exec "${project_name}"_cron_1 cat /etc/cron.d/rsync-backup | grep "\* \* \*"); then
err "Can't find cron_line in cron container."
return 1
fi
elif docker exec "$container" test -e /etc/crontabs/root; then
if ! cron_line=$(docker exec "$container" cat /etc/crontabs/root | grep " launch-rsync-backup " | grep "\* \* \*"); then
err "Can't find cron_line in cron container."
return 1
fi
else
err "Unrecognized cron container:"
echo " Can't find neither:" >&2
echo " - /etc/cron.d/rsync-backup for old-style cron services" >&2
echo " - nor /etc/crontabs/root for new-style cron services." >&2
return 1
fi
@ -1077,13 +1129,13 @@ docker restart "$container_id"
sleep 2
docker restart "$container_id"
EOF
return $errlvl
return 2
fi
warn "Unknown issue with ${DARKYELLOW}$service_name${NORMAL}'s container:"
echo " ${WHITE}cmd:${NORMAL} docker exec -ti $container_id echo" >&2
echo "$out" | prefix " ${DARKGRAY}|${NORMAL} " >&2
echo " ${DARKGRAY}..${NORMAL} leaving this as-is."
return $errlvl
return 1
}
docker:api() {
@ -1650,7 +1702,18 @@ cmdline.spec:odoo:cmd:restore:run() {
opts_load=()
[ "$opt_neutralize" ] && opts_load+=("--neutralize")
#cmdline.spec:odoo:cmd:restart:run --service "$odoo_service" || exit 1
project_name=$(compose:project_name) || exit 1
container:health:check-fix:no-matching-entries "${project_name}_${odoo_service}_1"
case "$?" in
0)
debug "Container ${project_name}_${odoo_service}_1 is healthy."
;;
1) err "Container ${project_name}_${odoo_service}_1 is not healthy."
exit 1
;;
2) info "Container ${project_name}_${odoo_service}_1 was fixed."
;;
esac
msg_dbname=default
[ -n "$opt_database" ] && msg_dbname="'$opt_database'"
@ -2552,4 +2615,73 @@ cmdline.spec:monujo:cmd:set-version:run() {
}
cmdline.spec::cmd:check:run() {
:
}
cmdline.spec.gnu check
cmdline.spec:check:cmd:backup:run() {
: :optfla: --notify,-n "Send result through notify"
: :optval: --service,-s "The backup service name (defaults to 'rsync-backup')"
## Check on daily bases if backup exist in config and when is the last backup done :
## ALERT if backup is set and last backup is older than 24h
local STATE_FILE="/var/run/myc-manage/backup.state"
mkdir -p "${STATE_FILE%/*}"
service=${opt_service:-rsync-backup}
project_name=$(compose:project_name) || exit 1
## check if service exists in compose.yml
if ! compose:service:exists "$project_name" "$service"; then
warn "no service ${DARKYELLOW}$service${NORMAL}. Ignoring."
return 0
fi
last_backup_datetime=$(
cat /srv/datastore/data/cron/var/log/cron/*rsync-backup_script{_*,}.log | grep "total size is" | sort | tail -n 1 | cut -f -2 -d " ")
last_backup_ts=$(date -d "$last_backup_datetime" +%s)
max_ts=$(date -d "24 hours ago" +%s)
state="ok"
if [ "$last_backup_ts" -lt "$max_ts" ]; then
state="ko"
fi
if [ -z "$opt_notify" ]; then
if [ "$state" == "ok" ]; then
info "Everything is ${GREEN}ok${NORMAL}. (last backup: ${DARKCYAN}$last_backup_datetime${NORMAL})"
return 0
fi
warn "Last backup older than 1 day. (last backup: ${DARKCYAN}$last_backup_datetime${NORMAL})"
return 1
fi
## notify
last_state=$(cat "$STATE_FILE" 2>/dev/null) || true
if [ "$state" == "$last_state" ]; then
[ "$state" == "ko" ] || return 0
is_old=$(find "$STATE_FILE" -type f -mtime +2) || return 1
[ -n "$is_old" ] || return 0
fi
echo "$state" > "$STATE_FILE"
message="[$(hostname)]: WARNING no backup done in the last 24h (No backup since $days days and $hours hours)"
timestamp=$(date +%s)
time_difference=$((timestamp - last_backup_ts))
days=$((time_difference / 86400))
hours=$((time_difference % 86400 / 3600))
message="WARNING: no backup done in the last 24h (No backup since $days days and $hours hours)"
send -t "ALERT Backup" "$message"
}
cmdline::parse "$@"

1
etc/sysctl.d/90-inotify_watches

@ -0,0 +1 @@
fs.inotify.max_user_watches = 524288
Loading…
Cancel
Save