|
|
@ -436,11 +436,11 @@ compose:install-backup() { |
|
|
|
ping_check "$host" || return 1 |
|
|
|
|
|
|
|
if [ -e "/root/.ssh/rsync_rsa" ]; then |
|
|
|
warn "deleting private key in /root/.ssh/rsync_rsa, has we are not using it anymore." |
|
|
|
warn "deleting private key in /root/.ssh/rsync_rsa, as we are not using it anymore." |
|
|
|
rm -fv /root/.ssh/rsync_rsa |
|
|
|
fi |
|
|
|
if [ -e "/root/.ssh/rsync_rsa.pub" ]; then |
|
|
|
warn "deleting public key in /root/.ssh/rsync_rsa.pub, has we are not using it anymore." |
|
|
|
warn "deleting public key in /root/.ssh/rsync_rsa.pub, as we are not using it anymore." |
|
|
|
rm -fv /root/.ssh/rsync_rsa.pub |
|
|
|
fi |
|
|
|
|
|
|
@ -887,8 +887,44 @@ export -f cyclos:unlock |
|
|
|
rocketchat:drop-indexes() { |
|
|
|
local project_name="$1" dbname="$2" |
|
|
|
|
|
|
|
echo "db.users.dropIndexes()" | |
|
|
|
compose:mongo "${project_name}" "${dbname}" |
|
|
|
compose:mongo "${project_name}" "${dbname}" <<'EOF' |
|
|
|
db.users.dropIndexes(); |
|
|
|
// Check if the 'rocketchat_uploads' collection exists |
|
|
|
var collections = db.getCollectionNames(); |
|
|
|
if (collections.indexOf('rocketchat_uploads') !== -1) { |
|
|
|
db.rocketchat_uploads.dropIndexes(); |
|
|
|
} |
|
|
|
if (collections.indexOf('rocketchat_read_receipts') !== -1) { |
|
|
|
db.rocketchat_read_receipts.dropIndexes(); |
|
|
|
var duplicates = []; |
|
|
|
|
|
|
|
db.getCollection("rocketchat_read_receipts").aggregate([ |
|
|
|
{ |
|
|
|
"$group": { |
|
|
|
"_id": { "roomId": "$roomId", "userId": "$userId", "messageId": "$messageId" }, |
|
|
|
"uniqueIds": { "$addToSet": "$_id" }, |
|
|
|
"count": { "$sum": 1 } |
|
|
|
} |
|
|
|
}, |
|
|
|
{ "$match": { "count": { "$gt": 1 } } } |
|
|
|
], |
|
|
|
{ allowDiskUse: true } |
|
|
|
).forEach(function (doc) { |
|
|
|
// remove 1st element |
|
|
|
doc.uniqueIds.shift(); |
|
|
|
doc.uniqueIds.forEach(function (dupId) { |
|
|
|
duplicates.push(dupId); |
|
|
|
} |
|
|
|
) |
|
|
|
}) |
|
|
|
|
|
|
|
// printjson(duplicates); |
|
|
|
|
|
|
|
db.getCollection("rocketchat_read_receipts").remove({ _id: { $in: duplicates } }); |
|
|
|
} |
|
|
|
|
|
|
|
EOF |
|
|
|
|
|
|
|
} |
|
|
|
export -f rocketchat:drop-indexes |
|
|
|
|
|
|
@ -916,9 +952,25 @@ compose:get_cron_docker_cmd() { |
|
|
|
local cron_line cmd_line docker_cmd |
|
|
|
project_name=$(compose:project_name) || return 1 |
|
|
|
|
|
|
|
container=$(compose:service:containers "${project_name}" "cron") || { |
|
|
|
err "Can't find service 'cron' in project ${project_name}." |
|
|
|
return 1 |
|
|
|
} |
|
|
|
if docker exec "$container" test -e /etc/cron.d/rsync-backup; then |
|
|
|
if ! cron_line=$(docker exec "${project_name}"_cron_1 cat /etc/cron.d/rsync-backup | grep "\* \* \*"); then |
|
|
|
err "Can't find cron_line in cron container." |
|
|
|
echo " Have you forgotten to run 'compose up' ?" >&2 |
|
|
|
return 1 |
|
|
|
fi |
|
|
|
elif docker exec "$container" test -e /etc/crontabs/root; then |
|
|
|
if ! cron_line=$(docker exec "$container" cat /etc/crontabs/root | grep " launch-rsync-backup " | grep "\* \* \*"); then |
|
|
|
err "Can't find cron_line in cron container." |
|
|
|
return 1 |
|
|
|
fi |
|
|
|
else |
|
|
|
err "Unrecognized cron container:" |
|
|
|
echo " Can't find neither:" >&2 |
|
|
|
echo " - /etc/cron.d/rsync-backup for old-style cron services" >&2 |
|
|
|
echo " - nor /etc/crontabs/root for new-style cron services." >&2 |
|
|
|
return 1 |
|
|
|
fi |
|
|
|
|
|
|
@ -1077,13 +1129,13 @@ docker restart "$container_id" |
|
|
|
sleep 2 |
|
|
|
docker restart "$container_id" |
|
|
|
EOF |
|
|
|
return $errlvl |
|
|
|
return 2 |
|
|
|
fi |
|
|
|
warn "Unknown issue with ${DARKYELLOW}$service_name${NORMAL}'s container:" |
|
|
|
echo " ${WHITE}cmd:${NORMAL} docker exec -ti $container_id echo" >&2 |
|
|
|
echo "$out" | prefix " ${DARKGRAY}|${NORMAL} " >&2 |
|
|
|
echo " ${DARKGRAY}..${NORMAL} leaving this as-is." |
|
|
|
return $errlvl |
|
|
|
return 1 |
|
|
|
} |
|
|
|
|
|
|
|
docker:api() { |
|
|
@ -1650,7 +1702,18 @@ cmdline.spec:odoo:cmd:restore:run() { |
|
|
|
opts_load=() |
|
|
|
[ "$opt_neutralize" ] && opts_load+=("--neutralize") |
|
|
|
|
|
|
|
#cmdline.spec:odoo:cmd:restart:run --service "$odoo_service" || exit 1 |
|
|
|
project_name=$(compose:project_name) || exit 1 |
|
|
|
container:health:check-fix:no-matching-entries "${project_name}_${odoo_service}_1" |
|
|
|
case "$?" in |
|
|
|
0) |
|
|
|
debug "Container ${project_name}_${odoo_service}_1 is healthy." |
|
|
|
;; |
|
|
|
1) err "Container ${project_name}_${odoo_service}_1 is not healthy." |
|
|
|
exit 1 |
|
|
|
;; |
|
|
|
2) info "Container ${project_name}_${odoo_service}_1 was fixed." |
|
|
|
;; |
|
|
|
esac |
|
|
|
|
|
|
|
msg_dbname=default |
|
|
|
[ -n "$opt_database" ] && msg_dbname="'$opt_database'" |
|
|
|