You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2961 lines
92 KiB

  1. #!/bin/bash
  2. . /etc/shlib >/dev/null 2>&1 || {
  3. echo "Error: you don't have kal-shlib-core installed."
  4. echo ""
  5. echo " You might want to add `deb.kalysto.org` deb repository, you'll need root access,"
  6. echo " so you might want to run these command after a \`sudo -i\` for instance..."
  7. echo ""
  8. echo " echo deb https://deb.kalysto.org no-dist kal-alpha kal-beta kal-main \\"
  9. echo " > /etc/apt/sources.list.d/kalysto.org.list"
  10. echo " wget -O - https://deb.kalysto.org/conf/public-key.gpg | apt-key add -"
  11. echo " apt-get update -o Dir::Etc::sourcelist=sources.list.d/kalysto.org.list \\"
  12. echo " -o Dir::Etc::sourceparts=- -o APT::Get::List-Cleanup=0"
  13. echo ""
  14. echo " Then install package kal-shlib-*:"
  15. echo ""
  16. echo " apt install kal-shlib-{common,cmdline,config,cache,docker,pretty}"
  17. echo ""
  18. exit 1
  19. } >&2
  20. include common
  21. include parse
  22. include cmdline
  23. include config
  24. include cache
  25. include fn
  26. include docker
  27. [[ "${BASH_SOURCE[0]}" != "${0}" ]] && SOURCED=true
  28. version=0.1
  29. desc='Install backup'
  30. help=""
  31. version_gt() { test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"; }
  32. read-0a-err() {
  33. local ret="$1" eof="" idx=0 last=
  34. read -r -- "${ret?}" <<<"0"
  35. shift
  36. while [ "$1" ]; do
  37. last=$idx
  38. read -r -- "$1" || {
  39. ## Put this last value in ${!ret}
  40. eof="$1"
  41. read -r -- "$ret" <<<"${!eof}"
  42. break
  43. }
  44. ((idx++))
  45. shift
  46. done
  47. [ -z "$eof" ] || {
  48. if [ "$last" != 0 ]; then
  49. echo "Error: read-0a-err couldn't fill all value" >&2
  50. read -r -- "$ret" <<<"127"
  51. else
  52. if [ -z "${!ret}" ]; then
  53. echo "Error: last value is not a number, did you finish with an errorlevel ?" >&2
  54. read -r -- "$ret" <<<"126"
  55. fi
  56. fi
  57. false
  58. }
  59. }
  60. p-0a-err() {
  61. "$@"
  62. echo -n "$?"
  63. }
  64. docker:running-container-projects() {
  65. :cache: scope=session
  66. docker ps --format '{{.Label "com.docker.compose.project"}}' | sort | uniq
  67. }
  68. decorator._mangle_fn docker:running-container-projects
  69. ssh:mk-private-key() {
  70. local host="$1" service_name="$2"
  71. (
  72. settmpdir VPS_TMPDIR
  73. ssh-keygen -t rsa -N "" -f "$VPS_TMPDIR/rsync_rsa" -C "$service_name@$host" >/dev/null
  74. cat "$VPS_TMPDIR/rsync_rsa"
  75. )
  76. }
  77. mailcow:has-images-running() {
  78. local images
  79. images=$(docker ps --format '{{.Image}}' | sort | uniq)
  80. [[ $'\n'"$images" == *$'\n'"mailcow/"* ]]
  81. }
  82. mailcow:has-container-project-mentionning-mailcow() {
  83. local projects
  84. projects=$(docker:running-container-projects) || return 1
  85. [[ $'\n'"$projects"$'\n' == *mailcow* ]]
  86. }
  87. mailcow:has-running-containers() {
  88. mailcow:has-images-running ||
  89. mailcow:has-container-project-mentionning-mailcow
  90. }
  91. mailcow:get-root() {
  92. :cache: scope=session
  93. local dir
  94. for dir in {/opt{,/apps},/root}/mailcow-dockerized; do
  95. [ -d "$dir" ] || continue
  96. [ -r "$dir/mailcow.conf" ] || continue
  97. echo "$dir"
  98. return 0
  99. done
  100. return 1
  101. }
  102. decorator._mangle_fn mailcow:get-root
  103. compose:get-compose-yml() {
  104. :cache: scope=session
  105. local path
  106. path=$(DEBUG=1 DRY_RUN=1 compose 2>&1 | egrep '^\s+-e HOST_COMPOSE_YML_FILE=' | cut -f 2- -d "=" | cut -f 1 -d " ")
  107. [ -e "$path" ] || return 1
  108. echo "$path"
  109. }
  110. decorator._mangle_fn compose:get-compose-yml
  111. export -f compose:get-compose-yml
  112. compose:has-container-project-myc() {
  113. local projects
  114. projects=$(docker:running-container-projects) || return 1
  115. [[ $'\n'"$projects"$'\n' == *$'\n'"myc"$'\n'* ]]
  116. }
  117. compose:service:exists() {
  118. local project="$1" service="$2" service_cfg
  119. service_cfg=$(cat "$(compose:get-compose-yml)" |
  120. shyaml get-value -y "$service" 2>/dev/null) || return 1
  121. [ -n "$service_cfg" ]
  122. }
  123. docker:container:name() {
  124. local container_id="$1"
  125. docker inspect --format '{{.Name}}' "$container_id" | cut -f 2 -d "/"
  126. }
  127. compose:service:container:get() {
  128. local project="$1" service="$2" container_id
  129. container_id=$(compose:service:container_one "$project" "$service") || return 1
  130. container_name=$(docker:container:name "$container_id") || return 1
  131. echo "$container_name"
  132. }
  133. compose:file:value-change() {
  134. local key="$1" value="$2"
  135. local compose_yml
  136. if ! compose_yml=$(compose:get-compose-yml); then
  137. err "Couldn't locate your 'compose.yml' file."
  138. return 1
  139. fi
  140. yaml:file:value-change "$compose_yml" "$key" "$value" || return 1
  141. }
  142. export -f compose:file:value-change
  143. yaml:file:value-change() {
  144. local file="$1" key="$2" value="$3" first=1 count=0 diff=""
  145. (
  146. cd "${file%/*}"
  147. while read-0 hunk; do
  148. if [ -n "$first" ]; then
  149. diff+="$hunk"
  150. first=
  151. continue
  152. fi
  153. if [[ "$hunk" =~ $'\n'"+"[[:space:]]+"${key##*.}:" ]]; then
  154. ((count++))
  155. diff+="$hunk" >&2
  156. else
  157. :
  158. # echo "discarding:" >&2
  159. # e "$hunk" | prefix " | " >&2
  160. fi
  161. done < <(
  162. export DEBUG=
  163. settmpdir YQ_TEMP
  164. cp "${file}" "$YQ_TEMP/compose.yml" &&
  165. yq -i ".${key} = \"${value}\"" "$YQ_TEMP/compose.yml" &&
  166. sed -ri 's/^([^# ])/\n\0/g' "$YQ_TEMP/compose.yml" &&
  167. diff -u0 -Z "${file}" "$YQ_TEMP/compose.yml" |
  168. sed -r "s/^(@@.*)$/\x00\1/g;s%^(\+\+\+) [^\t]+%\1 ${file}%g"
  169. printf "\0"
  170. )
  171. if [[ "$count" == 0 ]]; then
  172. err "No change made to '$file'."
  173. return 1
  174. fi
  175. if [[ "$count" != 1 ]]; then
  176. err "compose file change request seems dubious and was refused:"
  177. e "$diff" | prefix " | " >&2
  178. return 1
  179. fi
  180. echo Applying: >&2
  181. e "$diff" | prefix " | " >&2
  182. patch <<<"$diff"
  183. ) || exit 1
  184. }
  185. export -f yaml:file:value-change
  186. type:is-mailcow() {
  187. mailcow:get-root >/dev/null ||
  188. mailcow:has-running-containers
  189. }
  190. type:is-compose() {
  191. compose:get-compose-yml >/dev/null &&
  192. compose:has-container-project-myc
  193. }
  194. vps:get-type() {
  195. :cache: scope=session
  196. local fn
  197. for fn in $(declare -F | cut -f 3 -d " " | egrep "^type:is-"); do
  198. "$fn" && {
  199. echo "${fn#type:is-}"
  200. return 0
  201. }
  202. done
  203. return 1
  204. }
  205. decorator._mangle_fn vps:get-type
  206. mirror-dir:sources() {
  207. :cache: scope=session
  208. if ! shyaml get-values default.sources < /etc/mirror-dir/config.yml; then
  209. err "Couldn't query 'default.sources' in '/etc/mirror-dir/config.yml'."
  210. return 1
  211. fi
  212. }
  213. decorator._mangle_fn mirror-dir:sources
  214. mirror-dir:check-add() {
  215. local elt="$1" sources
  216. sources=$(mirror-dir:sources) || return 1
  217. if [[ $'\n'"$sources"$'\n' == *$'\n'"$elt"$'\n'* ]]; then
  218. info "Volume $elt already in sources"
  219. else
  220. Elt "Adding directory $elt"
  221. sed -i "/sources:/a\ - \"${elt}\"" \
  222. /etc/mirror-dir/config.yml
  223. Feedback || return 1
  224. fi
  225. }
  226. mirror-dir:check-add-vol() {
  227. local elt="$1"
  228. mirror-dir:check-add "/var/lib/docker/volumes/*_${elt}-*/_data"
  229. }
  230. ## The first colon is to prevent auto-export of function from shlib
  231. : ; bash-bug-5() { { cat; } < <(e) >/dev/null; ! cat "$1"; } && bash-bug-5 <(e) 2>/dev/null &&
  232. export BASH_BUG_5=1 && unset -f bash_bug_5
  233. wrap() {
  234. local label="$1" code="$2"
  235. shift 2
  236. export VERBOSE=1
  237. interpreter=/bin/bash
  238. if [ -n "$BASH_BUG_5" ]; then
  239. (
  240. settmpdir tmpdir
  241. fname=${label##*/}
  242. e "$code" > "$tmpdir/$fname" &&
  243. chmod +x "$tmpdir/$fname" &&
  244. Wrap -vsd "$label" -- "$interpreter" "$tmpdir/$fname" "$@"
  245. )
  246. else
  247. Wrap -vsd "$label" -- "$interpreter" <(e "$code") "$@"
  248. fi
  249. }
  250. ping_check() {
  251. #global ignore_ping_check
  252. local host="$1"
  253. ip=$(getent ahosts "$host" | egrep "^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\s+" |
  254. head -n 1 | cut -f 1 -d " ") || return 1
  255. my_ip=$(curl -s myip.kal.fr)
  256. if [ "$ip" != "$my_ip" ]; then
  257. if [ -n "$ignore_ping_check" ]; then
  258. warn "IP of '$host' ($ip) doesn't match mine ($my_ip). Ignoring due to ``--ignore-ping-check`` option."
  259. else
  260. err "IP of '$host' ($ip) doesn't match mine ($my_ip). Use ``--ignore-ping-check`` to ignore check."
  261. return 1
  262. fi
  263. fi
  264. }
  265. mailcow:install-backup() {
  266. local BACKUP_SERVER="$1" ignore_ping_check="$2" mailcow_root DOMAIN
  267. ## find installation
  268. mailcow_root=$(mailcow:get-root) || {
  269. err "Couldn't find a valid mailcow root directory."
  270. return 1
  271. }
  272. ## check ok
  273. DOMAIN=$(cat "$mailcow_root/.env" | grep ^MAILCOW_HOSTNAME= | cut -f 2 -d =) || {
  274. err "Couldn't find MAILCOW_HOSTNAME in file \"$mailcow_root/.env\"."
  275. return 1
  276. }
  277. ping_check "$DOMAIN" || return 1
  278. MYSQL_ROOT_PASSWORD=$(cat "$mailcow_root/.env" | grep ^DBROOT= | cut -f 2 -d =) || {
  279. err "Couldn't find DBROOT in file \"$mailcow_root/.env\"."
  280. return 1
  281. }
  282. if docker compose >/dev/null 2>&1; then
  283. MYSQL_CONTAINER=${MYSQL_CONTAINER:-mailcowdockerized-mysql-mailcow-1}
  284. else
  285. MYSQL_CONTAINER=${MYSQL_CONTAINER:-mailcowdockerized_mysql-mailcow_1}
  286. fi
  287. container_id=$(docker ps -f name="$MYSQL_CONTAINER" --format "{{.ID}}")
  288. if [ -z "$container_id" ]; then
  289. err "Couldn't find docker container named '$MYSQL_CONTAINER'."
  290. return 1
  291. fi
  292. export KEY_BACKUP_ID="mailcow"
  293. export MYSQL_ROOT_PASSWORD
  294. export MYSQL_CONTAINER
  295. export BACKUP_SERVER
  296. export DOMAIN
  297. wrap "Install rsync-backup on host" "
  298. cd /srv/charm-store/rsync-backup
  299. bash ./hooks/install.d/60-install.sh
  300. " || return 1
  301. wrap "Mysql dump install" "
  302. cd /srv/charm-store/mariadb
  303. bash ./hooks/install.d/60-backup.sh
  304. " || return 1
  305. ## Using https://github.com/mailcow/mailcow-dockerized/blob/master/helper-scripts/backup_and_restore.sh
  306. for elt in "vmail{,-attachments-vol}" crypt redis rspamd postfix; do
  307. mirror-dir:check-add-vol "$elt" || return 1
  308. done
  309. mirror-dir:check-add "$mailcow_root" || return 1
  310. mirror-dir:check-add "/var/backups/mysql" || return 1
  311. mirror-dir:check-add "/etc" || return 1
  312. dest="$BACKUP_SERVER"
  313. dest="${dest%/*}"
  314. ssh_options=()
  315. if [[ "$dest" == *":"* ]]; then
  316. port="${dest##*:}"
  317. dest="${dest%%:*}"
  318. ssh_options=(-p "$port")
  319. else
  320. port=""
  321. dest="${dest%%:*}"
  322. fi
  323. info "You can run this following command from an host having admin access to $dest:"
  324. echo " (Or send it to a backup admin of $dest)" >&2
  325. echo "ssh ${ssh_options[@]} myadmin@$dest ssh-key add '$(cat /var/lib/rsync/.ssh/id_rsa.pub)'"
  326. }
  327. compose:has_domain() {
  328. local compose_file="$1" host="$2" name conf relation relation_value domain server_aliases
  329. while read-0 name conf ; do
  330. name=$(e "$name" | shyaml get-value)
  331. if [[ "$name" =~ ^[a-zA-Z0-9-]+\.[a-zA-Z0-9-]+ ]]; then
  332. [ "$host" == "$name" ] && return 0
  333. fi
  334. rel=$(e "$conf" | shyaml -y get-value relations 2>/dev/null) || continue
  335. for relation in web-proxy publish-dir; do
  336. relation_value=$(e "$rel" | shyaml -y get-value "$relation" 2>/dev/null) || continue
  337. while read-0 label conf_relation; do
  338. domain=$(e "$conf_relation" | shyaml get-value "domain" 2>/dev/null) && {
  339. [ "$host" == "$domain" ] && return 0
  340. }
  341. server_aliases=$(e "$conf_relation" | shyaml get-values "server-aliases" 2>/dev/null) && {
  342. [[ $'\n'"$server_aliases" == *$'\n'"$host"$'\n'* ]] && return 0
  343. }
  344. done < <(e "$relation_value" | shyaml -y key-values-0)
  345. done
  346. done < <(shyaml -y key-values-0 < "$compose_file")
  347. return 1
  348. }
  349. compose:install-backup() {
  350. local BACKUP_SERVER="$1" service_name="$2" compose_file="$3" ignore_ping_check="$4" ignore_domain_check="$5"
  351. ## XXXvlab: far from perfect as it mimics and depends internal
  352. ## logic of current default way to get a domain in compose-core
  353. host=$(hostname)
  354. if ! compose:has_domain "$compose_file" "$host"; then
  355. if [ -n "$ignore_domain_check" ]; then
  356. warn "domain of '$host' not found in compose file '$compose_file'. Ignoring due to ``--ignore-domain-check`` option."
  357. else
  358. err "domain of '$host' not found in compose file '$compose_file'. Use ``--ignore-domain-check`` to ignore check."
  359. return 1
  360. fi
  361. fi
  362. ping_check "$host" || return 1
  363. if [ -e "/root/.ssh/rsync_rsa" ]; then
  364. warn "deleting private key in /root/.ssh/rsync_rsa, as we are not using it anymore."
  365. rm -fv /root/.ssh/rsync_rsa
  366. fi
  367. if [ -e "/root/.ssh/rsync_rsa.pub" ]; then
  368. warn "deleting public key in /root/.ssh/rsync_rsa.pub, as we are not using it anymore."
  369. rm -fv /root/.ssh/rsync_rsa.pub
  370. fi
  371. if service_cfg=$(cat "$compose_file" |
  372. shyaml get-value -y "$service_name" 2>/dev/null); then
  373. info "Entry for service ${DARKYELLOW}$service_name${NORMAL}" \
  374. "is already present in '$compose_file'."
  375. cfg=$(e "$service_cfg" | shyaml get-value -y options) || {
  376. err "No ${WHITE}options${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
  377. "entry in '$compose_file'."
  378. return 1
  379. }
  380. private_key=$(e "$cfg" | shyaml get-value private-key) || return 1
  381. target=$(e "$cfg" | shyaml get-value target) || return 1
  382. if [ "$target" != "$BACKUP_SERVER" ]; then
  383. err "Existing backup target '$target' is different" \
  384. "from specified '$BACKUP_SERVER'"
  385. return 1
  386. fi
  387. else
  388. private_key=$(ssh:mk-private-key "$host" "$service_name")
  389. cat <<EOF >> "$compose_file"
  390. $service_name:
  391. options:
  392. ident: $host
  393. target: $BACKUP_SERVER
  394. private-key: |
  395. $(e "$private_key" | sed -r 's/^/ /g')
  396. EOF
  397. fi
  398. dest="$BACKUP_SERVER"
  399. dest="${dest%/*}"
  400. ssh_options=()
  401. if [[ "$dest" == *":"* ]]; then
  402. port="${dest##*:}"
  403. dest="${dest%%:*}"
  404. ssh_options=(-p "$port")
  405. else
  406. port=""
  407. dest="${dest%%:*}"
  408. fi
  409. info "You can run this following command from an host having admin access to $dest:"
  410. echo " (Or send it to a backup admin of $dest)" >&2
  411. ## We remove ending label (label will be added or not in the
  412. ## private key, and thus here, depending on the version of
  413. ## openssh-client)
  414. public_key=$(ssh-keygen -y -f <(e "$private_key"$'\n') | sed -r 's/ [^ ]+@[^ ]+$//')
  415. echo "ssh ${ssh_options[@]} myadmin@$dest ssh-key add '$public_key compose@$host'"
  416. }
  417. backup-action() {
  418. local action="$1"
  419. shift
  420. vps_type=$(vps:get-type) || {
  421. err "Failed to get type of installation."
  422. return 1
  423. }
  424. if ! fn.exists "${vps_type}:${action}"; then
  425. err "type '${vps_type}' has no ${vps_type}:${action} implemented yet."
  426. return 1
  427. fi
  428. "${vps_type}:${action}" "$@"
  429. }
  430. compose:get_default_backup_host_ident() {
  431. local service_name="$1" ## Optional
  432. local compose_file service_cfg cfg target
  433. compose_file=$(compose:get-compose-yml)
  434. service_name="${service_name:-rsync-backup}"
  435. if ! service_cfg=$(cat "$compose_file" |
  436. shyaml get-value -y "$service_name" 2>/dev/null); then
  437. err "No service named '$service_name' found in 'compose.yml'."
  438. return 1
  439. fi
  440. cfg=$(e "$service_cfg" | shyaml get-value -y options) || {
  441. err "No ${WHITE}options${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
  442. "entry in '$compose_file'."
  443. return 1
  444. }
  445. if ! target=$(e "$cfg" | shyaml get-value target); then
  446. err "No ${WHITE}options.target${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
  447. "entry in '$compose_file'."
  448. fi
  449. if ! target=$(e "$cfg" | shyaml get-value target); then
  450. err "No ${WHITE}options.target${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
  451. "entry in '$compose_file'."
  452. fi
  453. if ! ident=$(e "$cfg" | shyaml get-value ident); then
  454. err "No ${WHITE}options.ident${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
  455. "entry in '$compose_file'."
  456. fi
  457. echo "$target $ident"
  458. }
  459. mailcow:get_default_backup_host_ident() {
  460. local content cron_line ident found dest cmd_line
  461. if ! [ -e "/etc/cron.d/mirror-dir" ]; then
  462. err "No '/etc/cron.d/mirror-dir' found."
  463. return 1
  464. fi
  465. content=$(cat /etc/cron.d/mirror-dir) || {
  466. err "Can't read '/etc/cron.d/mirror-dir'."
  467. return 1
  468. }
  469. if ! cron_line=$(e "$content" | grep "mirror-dir backup"); then
  470. err "Can't find 'mirror-dir backup' line in '/etc/cron.d/mirror-dir'."
  471. return 1
  472. fi
  473. cron_line=${cron_line%|*}
  474. cmd_line=(${cron_line#*root})
  475. found=
  476. dest=
  477. for arg in "${cmd_line[@]}"; do
  478. [ -n "$found" ] && {
  479. dest="$arg"
  480. break
  481. }
  482. [ "$arg" == "-d" ] && {
  483. found=1
  484. }
  485. done
  486. if ! [[ "$dest" =~ ^[\'\"a-zA-Z0-9:/.-]+$ ]]; then
  487. err "Can't find valid destination in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'."
  488. return 1
  489. fi
  490. if [[ "$dest" == \"*\" ]] || [[ "$dest" == \'*\' ]]; then
  491. ## unquoting, the eval should be safe because of previous check
  492. dest=$(eval e "$dest")
  493. fi
  494. if [ -z "$dest" ]; then
  495. err "Can't find destination in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'."
  496. return 1
  497. fi
  498. ## looking for ident
  499. found=
  500. ident=
  501. for arg in "${cmd_line[@]}"; do
  502. [ -n "$found" ] && {
  503. ident="$arg"
  504. break
  505. }
  506. [ "$arg" == "-h" ] && {
  507. found=1
  508. }
  509. done
  510. if ! [[ "$ident" =~ ^[\'\"a-zA-Z0-9.-]+$ ]]; then
  511. err "Can't find valid identifier in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'."
  512. return 1
  513. fi
  514. if [[ "$ident" == \"*\" ]] || [[ "$ident" == \'*\' ]]; then
  515. ## unquoting, the eval should be safe because of previous check
  516. ident=$(eval e "$ident")
  517. fi
  518. if [ -z "$ident" ]; then
  519. err "Can't find destination in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'."
  520. return 1
  521. fi
  522. echo "$dest $ident"
  523. }
  524. compose:service:containers() {
  525. local project="$1" service="$2"
  526. docker ps \
  527. --filter label="com.docker.compose.project=$project" \
  528. --filter label="compose.master-service=$service" \
  529. --format="{{.ID}}"
  530. }
  531. export -f compose:service:containers
  532. compose:service:container_one() {
  533. local project="$1" service="$2" container_id
  534. {
  535. read-0a container_id || {
  536. err "service ${DARKYELLOW}$service${NORMAL} has no running container."
  537. return 1
  538. }
  539. if read-0a _; then
  540. err "service ${DARKYELLOW}$service${NORMAL} has more than one running container."
  541. return 1
  542. fi
  543. } < <(compose:service:containers "$project" "$service")
  544. echo "$container_id"
  545. }
  546. export -f compose:service:container_one
  547. compose:service:container_first() {
  548. local project="$1" service="$2" container_id
  549. {
  550. read-0a container_id || {
  551. err "service ${DARKYELLOW}$service${NORMAL} has no running container."
  552. return 1
  553. }
  554. if read-0a _; then
  555. warn "service ${DARKYELLOW}$service${NORMAL} has more than one running container."
  556. fi
  557. } < <(compose:service:containers "$project" "$service")
  558. echo "$container_id"
  559. }
  560. export -f compose:service:container_first
  561. docker:running_containers() {
  562. :cache: scope=session
  563. docker ps --format="{{.ID}}"
  564. }
  565. decorator._mangle_fn docker:running_containers
  566. export -f docker:running_containers
  567. compose:project:containers() {
  568. local project="$1" opts
  569. opts+=(--filter label="com.docker.compose.project=$project")
  570. docker ps "${opts[@]}" \
  571. --format="{{.ID}}"
  572. }
  573. export -f compose:project:containers
  574. compose:charm:containers() {
  575. local project="$1" charm="$2"
  576. docker ps \
  577. --filter label="com.docker.compose.project=$project" \
  578. --filter label="compose.charm=$charm" \
  579. --format="{{.ID}}"
  580. }
  581. export -f compose:charm:containers
  582. compose:charm:container_one() {
  583. local project="$1" charm="$2" container_id
  584. {
  585. read-0a container_id || {
  586. err "charm ${DARKPINK}$charm${NORMAL} has no running container in project '$project'."
  587. return 1
  588. }
  589. if read-0a _; then
  590. err "charm ${DARKPINK}$charm${NORMAL} has more than one running container."
  591. return 1
  592. fi
  593. } < <(compose:charm:containers "$project" "$charm")
  594. echo "$container_id"
  595. }
  596. export -f compose:charm:container_one
  597. compose:charm:container_first() {
  598. local project="$1" charm="$2" container_id
  599. {
  600. read-0a container_id || {
  601. warn "charm ${DARKYELLOW}$charm${NORMAL} has no running container in project '$project'."
  602. }
  603. if read-0a _; then
  604. warn "charm ${DARKYELLOW}$charm${NORMAL} has more than one running container."
  605. fi
  606. } < <(compose:charm:containers "$project" "$charm")
  607. echo "$container_id"
  608. }
  609. export -f compose:charm:container_first
  610. compose:get_url() {
  611. local project_name="$1" service="$2" data_file network ip
  612. data_dir=("/var/lib/compose/relations/${project_name}/${service}-"*"/web-proxy")
  613. if [ "${#data_dir[@]}" -gt 1 ]; then
  614. err "More than one web-proxy relation." \
  615. "Current 'vps' algorithm is insufficient" \
  616. "to figure out which relation is concerned"
  617. return 1
  618. fi
  619. data_file="${data_dir[0]}/data"
  620. if [ -d "${data_file%/*}" ]; then
  621. (
  622. set -o pipefail
  623. ## users can't cat directly the content
  624. docker run --rm \
  625. -v "${data_file%/*}":/tmp/dummy alpine \
  626. cat "/tmp/dummy/${data_file##*/}" |
  627. shyaml get-value url
  628. )
  629. else
  630. ## Assume there are no frontend relation here, the url is direct IP
  631. container_id=$(compose:service:container_one "${project_name}" "${service}") || return 1
  632. network_ip=$(docker:container:network_ip_one "${container_id}") || return 1
  633. IFS=":" read -r network ip <<<"$network_ip"
  634. tcp_port=
  635. for port in $(docker:exposed_ports "$container_id"); do
  636. IFS="/" read port type <<<"$port"
  637. [ "$type" == "tcp" ] || continue
  638. tcp_port="$port"
  639. break
  640. done
  641. echo -n "http://$ip"
  642. [ -n "$tcp_port" ] && echo ":$tcp_port"
  643. fi || {
  644. err "Failed querying ${service} to frontend relation to get url."
  645. return 1
  646. }
  647. }
  648. export -f compose:get_url
  649. compose:container:service() {
  650. local container="$1" service
  651. if ! service=$(docker:container:label "$container" "compose.service"); then
  652. err "Failed to get service name from container ${container}."
  653. return 1
  654. fi
  655. if [ -z "$service" ]; then
  656. err "No service found for container ${container}."
  657. return 1
  658. fi
  659. echo "$service"
  660. }
  661. export -f compose:container:service
  662. compose:psql() {
  663. local project_name="$1" dbname="$2" container_id
  664. shift 2
  665. container_id=$(compose:charm:container_one "$project_name" "postgres") || return 1
  666. docker exec -i "${container_id}" psql -U postgres "$dbname" "$@"
  667. }
  668. export -f compose:psql
  669. compose:mongo() {
  670. local project_name="$1" dbname="$2" container_id
  671. container_id=$(compose:charm:container_one "$project_name" "mongo") || return 1
  672. docker exec -i "${container_id}" mongo --quiet "$dbname"
  673. }
  674. export -f compose:mongo
  675. compose:pgm() {
  676. local project_name="$1" container_network_ip container_ip container_network
  677. shift
  678. container_id=$(compose:charm:container_one "$project_name" "postgres") || return 1
  679. service_name=$(compose:container:service "$container_id") || return 1
  680. image_id=$(docker:container:image "$container_id") || return 1
  681. container_network_ip=$(docker:container:network_ip_one "$container_id") || return 1
  682. IFS=":" read -r container_network container_ip <<<"$container_network_ip"
  683. pgpass="/srv/datastore/data/${service_name}/var/lib/postgresql/data/pgpass"
  684. local final_pgm_docker_run_opts+=(
  685. -u 0 -e prefix_pg_local_command=" "
  686. --network "${container_network}"
  687. -e PGHOST="$container_ip"
  688. -e PGUSER=postgres
  689. -v "$pgpass:/root/.pgpass"
  690. "${pgm_docker_run_opts[@]}"
  691. )
  692. cmd=(docker run --rm \
  693. "${final_pgm_docker_run_opts[@]}" \
  694. "${image_id}" pgm "$@"
  695. )
  696. echo "${cmd[@]}"
  697. "${cmd[@]}"
  698. }
  699. export -f compose:pgm
  700. postgres:dump() {
  701. local project_name="$1" src="$2" dst="$3"
  702. (
  703. settmpdir PGM_TMP_LOCATION
  704. pgm_docker_run_opts=('-v' "${PGM_TMP_LOCATION}:/tmp/dump")
  705. compose:pgm "$project_name" cp -f "$src" "/tmp/dump/dump.gz" &&
  706. mv "$PGM_TMP_LOCATION/dump.gz" "$dst"
  707. ) || return 1
  708. }
  709. export -f postgres:dump
  710. postgres:restore() {
  711. local project_name="$1" src="$2" dst="$3"
  712. full_src_path=$(readlink -e "$src") || exit 1
  713. (
  714. pgm_docker_run_opts=('-v' "${full_src_path}:/tmp/dump.gz")
  715. compose:pgm "$project_name" cp -f "/tmp/dump.gz" "$dst"
  716. ) || return 1
  717. }
  718. export -f postgres:restore
  719. odoo:get_public_user_id() {
  720. local project_name="$1" dbname="$2"
  721. echo "select res_id from ir_model_data where model = 'res.users' and name = 'public_user';" |
  722. compose:psql "$project_name" "$dbname" -qAt
  723. }
  724. cyclos:set_root_url() {
  725. local project_name="$1" dbname="$2" url="$3"
  726. echo "UPDATE configurations SET root_url = '$url';" |
  727. compose:psql "$project_name" "$dbname" || {
  728. err "Failed to set cyclos url value in '$dbname' database."
  729. return 1
  730. }
  731. }
  732. export -f cyclos:set_root_url
  733. cyclos:unlock() {
  734. local project_name="$1" dbname="$2"
  735. echo "delete from database_lock;" |
  736. compose:psql "${project_name}" "${dbname}"
  737. }
  738. export -f cyclos:unlock
  739. rocketchat:drop-indexes() {
  740. local project_name="$1" dbname="$2"
  741. compose:mongo "${project_name}" "${dbname}" <<'EOF'
  742. db.users.dropIndexes();
  743. // Check if the 'rocketchat_uploads' collection exists
  744. var collections = db.getCollectionNames();
  745. if (collections.indexOf('rocketchat_uploads') !== -1) {
  746. db.rocketchat_uploads.dropIndexes();
  747. }
  748. if (collections.indexOf('rocketchat_read_receipts') !== -1) {
  749. db.rocketchat_read_receipts.dropIndexes();
  750. var duplicates = [];
  751. db.getCollection("rocketchat_read_receipts").aggregate([
  752. {
  753. "$group": {
  754. "_id": { "roomId": "$roomId", "userId": "$userId", "messageId": "$messageId" },
  755. "uniqueIds": { "$addToSet": "$_id" },
  756. "count": { "$sum": 1 }
  757. }
  758. },
  759. { "$match": { "count": { "$gt": 1 } } }
  760. ],
  761. { allowDiskUse: true }
  762. ).forEach(function (doc) {
  763. // remove 1st element
  764. doc.uniqueIds.shift();
  765. doc.uniqueIds.forEach(function (dupId) {
  766. duplicates.push(dupId);
  767. }
  768. )
  769. })
  770. // printjson(duplicates);
  771. db.getCollection("rocketchat_read_receipts").remove({ _id: { $in: duplicates } });
  772. }
  773. EOF
  774. }
  775. export -f rocketchat:drop-indexes
  776. compose:project_name() {
  777. if [ -z "$PROJECT_NAME" ]; then
  778. PROJECT_NAME=$(compose --get-project-name) || {
  779. err "Couldn't get project name."
  780. return 1
  781. }
  782. if [ -z "$PROJECT_NAME" -o "$PROJECT_NAME" == "orphan" ]; then
  783. err "Couldn't get project name, probably because 'compose.yml' wasn't found."
  784. echo " Please ensure to either configure a global 'compose.yml' or run this command" >&2
  785. echo " in a compose project (with 'compose.yml' on the top level directory)." >&2
  786. return 1
  787. fi
  788. export PROJECT_NAME
  789. fi
  790. echo "$PROJECT_NAME"
  791. }
  792. export -f compose:project_name
  793. compose:get_cron_docker_cmd() {
  794. local cron_line cmd_line docker_cmd
  795. project_name=$(compose:project_name) || return 1
  796. container=$(compose:service:containers "${project_name}" "cron") || {
  797. err "Can't find service 'cron' in project ${project_name}."
  798. return 1
  799. }
  800. if docker exec "$container" test -e /etc/cron.d/rsync-backup; then
  801. if ! cron_line=$(docker exec "${project_name}"_cron_1 cat /etc/cron.d/rsync-backup | grep "\* \* \*"); then
  802. err "Can't find cron line in legacy cron container."
  803. return 1
  804. fi
  805. elif docker exec "$container" test -e /etc/crontabs/root; then
  806. if ! cron_line=$(docker exec "$container" cat /etc/crontabs/root | grep " launch-rsync-backup " | grep "\* \* \*"); then
  807. err "Can't find cron line in cron container."
  808. return 1
  809. fi
  810. else
  811. err "Unrecognized cron container:"
  812. echo " Can't find neither:" >&2
  813. echo " - /etc/cron.d/rsync-backup for old-style cron services" >&2
  814. echo " - nor /etc/crontabs/root for new-style cron services." >&2
  815. return 1
  816. fi
  817. cron_line=${cron_line%|*}
  818. cron_line=${cron_line%"2>&1"*}
  819. cmd_line="${cron_line#*root}"
  820. eval "args=($cmd_line)"
  821. ## should be last argument
  822. docker_cmd=$(echo ${args[@]: -1})
  823. if ! [[ "$docker_cmd" == "docker run --rm -e "* ]]; then
  824. echo "docker command found should start with 'docker run'." >&2
  825. echo "Here's command:" >&2
  826. echo " $docker_cmd" >&2
  827. return 1
  828. fi
  829. e "$docker_cmd"
  830. }
  831. compose:recover-target() {
  832. local backup_host="$1" ident="$2" src="$3" dst="$4" service_name="${5:-rsync-backup}" project_name
  833. project_name=$(compose:project_name) || return 1
  834. docker_image="${project_name}_${service_name}"
  835. if ! docker_has_image "$docker_image"; then
  836. compose build "${service_name}" || {
  837. err "Couldn't find nor build image for service '$service_name'."
  838. return 1
  839. }
  840. fi
  841. dst="${dst%/}" ## remove final slash
  842. ssh_options=(-o StrictHostKeyChecking=no)
  843. if [[ "$backup_host" == *":"* ]]; then
  844. port="${backup_host##*:}"
  845. backup_host="${backup_host%%:*}"
  846. ssh_options+=(-p "$port")
  847. else
  848. port=""
  849. backup_host="${backup_host%%:*}"
  850. fi
  851. rsync_opts=(
  852. -e "ssh ${ssh_options[*]} -i /var/lib/rsync/.ssh/id_rsa -l rsync"
  853. -azvArH --delete --delete-excluded
  854. --partial --partial-dir .rsync-partial
  855. --numeric-ids
  856. )
  857. if [ "$DRY_RUN" ]; then
  858. rsync_opts+=("-n")
  859. fi
  860. cmd=(
  861. docker run --rm --entrypoint rsync \
  862. -v "/srv/datastore/config/${service_name}/var/lib/rsync":/var/lib/rsync \
  863. -v "${dst%/*}":/mnt/dest \
  864. "$docker_image" \
  865. "${rsync_opts[@]}" "$backup_host":"/var/mirror/$ident/$src" "/mnt/dest/${dst##*/}"
  866. )
  867. echo "${WHITE}Launching: ${NORMAL} ${cmd[@]}"
  868. "${cmd[@]}"
  869. }
  870. mailcow:recover-target() {
  871. local backup_host="$1" ident="$2" src="$3" dst="$4"
  872. dst="${dst%/}" ## remove final slash
  873. ssh_options=(-o StrictHostKeyChecking=no)
  874. if [[ "$backup_host" == *":"* ]]; then
  875. port="${backup_host##*:}"
  876. backup_host="${backup_host%%:*}"
  877. ssh_options+=(-p "$port")
  878. else
  879. port=""
  880. backup_host="${backup_host%%:*}"
  881. fi
  882. rsync_opts=(
  883. -e "ssh ${ssh_options[*]} -i /var/lib/rsync/.ssh/id_rsa -l rsync"
  884. -azvArH --delete --delete-excluded
  885. --partial --partial-dir .rsync-partial
  886. --numeric-ids
  887. )
  888. if [ "$DRY_RUN" ]; then
  889. rsync_opts+=("-n")
  890. fi
  891. cmd=(
  892. rsync "${rsync_opts[@]}" "$backup_host":"/var/mirror/$ident/$src" "${dst}"
  893. )
  894. echo "${WHITE}Launching: ${NORMAL} ${cmd[@]}"
  895. "${cmd[@]}"
  896. }
  897. nextcloud:src:version() {
  898. local version
  899. if ! version=$(cat "/srv/datastore/data/${nextcloud_service}/var/www/html/version.php" 2>/dev/null); then
  900. err "Can't find version.php file to get last version installed."
  901. exit 1
  902. fi
  903. version=$(e "$version" | grep 'VersionString =' | cut -f 3 -d ' ' | cut -f 2 -d "'")
  904. if [ -z "$version" ]; then
  905. err "Can't figure out version from version.php content."
  906. exit 1
  907. fi
  908. echo "$version"
  909. }
  910. container:health:check-fix:container-aliveness() {
  911. local container_id="$1"
  912. timeout 5s docker inspect "$container_id" >/dev/null 2>&1
  913. errlvl=$?
  914. if [ "$errlvl" == 124 ]; then
  915. service_name=$(docker ps --filter id="$container_id" --format '{{.Label "com.docker.compose.service"}}')
  916. container_name=($(docker ps --filter id="$container_id" --format '{{.Names}}'))
  917. pid=$(ps ax -o pid,command -ww | grep docker-containerd-shim |
  918. grep "/$container_id" |
  919. sed -r 's/^ *//g' |
  920. cut -f 1 -d " ")
  921. if [ -z "$pid" ]; then
  922. err "container ${DARKCYAN}${container_name[0]}${NORMAL} for ${DARKYELLOW}$service_name${NORMAL} doesn't answer to 'inspect' command. Can't find its PID neither."
  923. return 1
  924. fi
  925. echo "container ${DARKCYAN}${container_name[0]}${NORMAL} for ${DARKYELLOW}$service_name${NORMAL} doesn't answer to 'inspect' command (pid: $pid)."
  926. Wrap -d "kill pid $pid and restart" <<EOF
  927. kill "$pid"
  928. sleep 2
  929. docker restart "$container_id"
  930. EOF
  931. fi
  932. return $errlvl
  933. }
  934. container:health:check-fix:no-matching-entries() {
  935. local container_id="$1"
  936. out=$(docker exec -u root "$container_id" echo 2>&1)
  937. errlvl=$?
  938. [ "$errlvl" == 0 ] && return 0
  939. service_name=$(docker ps --filter id="$container_id" --format '{{.Label "com.docker.compose.service"}}')
  940. container_name=($(docker ps --filter id="$container_id" --format '{{.Names}}'))
  941. if [ "$errlvl" == 126 ] && [[ "$out" == *"no matching entries in passwd file"* ]]; then
  942. echo "container ${DARKCYAN}${container_name[0]}${NORMAL} for ${DARKYELLOW}$service_name${NORMAL} has ${DARKRED}no-matching-entries${NORMAL} bug." >&2
  943. Wrap -d "restarting container of ${DARKYELLOW}$service_name${NORMAL} twice" <<EOF
  944. docker restart "$container_id"
  945. sleep 2
  946. docker restart "$container_id"
  947. EOF
  948. return 2
  949. fi
  950. warn "Unknown issue with ${DARKYELLOW}$service_name${NORMAL}'s container:"
  951. echo " ${WHITE}cmd:${NORMAL} docker exec -ti $container_id echo" >&2
  952. echo "$out" | prefix " ${DARKGRAY}|${NORMAL} " >&2
  953. echo " ${DARKGRAY}..${NORMAL} leaving this as-is."
  954. return 1
  955. }
  956. docker:api() {
  957. local endpoint="$1"
  958. curl -sS --unix-socket /var/run/docker.sock "http://localhost$endpoint"
  959. }
  960. docker:containers:id() {
  961. docker:api /containers/json | jq -r ".[] | .Id"
  962. }
  963. docker:containers:names() {
  964. docker:api /containers/json | jq -r '.[] | .Names[0] | ltrimstr("/")'
  965. }
  966. docker:container:stats() {
  967. container="$1"
  968. docker:api "/containers/$container/stats?stream=false"
  969. }
  970. docker:containers:stats() {
  971. :cache: scope=session
  972. local jobs='' line container id_names sha names name data service project
  973. local DC="com.docker.compose"
  974. local PSF_values=(
  975. ".ID" ".Names" ".Label \"$DC.project\"" ".Label \"$DC.service\"" ".Image"
  976. )
  977. local PSF="$(printf "{{%s}} " "${PSF_values[@]}")"
  978. id_names=$(docker ps -a --format="$PSF") || return 1
  979. ## Create a docker container table from name/sha to service, project, image_name
  980. declare -A resolve
  981. while read-0a line; do
  982. sha=${line%% *}; line=${line#* }
  983. names=${line%% *}; line=${line#* }
  984. names=(${names//,/ })
  985. for name in "${names[@]}"; do
  986. resolve["$name"]="$line"
  987. done
  988. resolve["$sha"]="$line"
  989. done < <(printf "%s\n" "$id_names")
  990. declare -A data
  991. while read-0a line; do
  992. name=${line%% *}; line=${line#* }
  993. ts=${line%% *}; line=${line#* }
  994. resolved="${resolve["$name"]}"
  995. project=${resolved%% *}; resolved=${resolved#* }
  996. service=${resolved%% *}; resolved=${resolved#* }
  997. image_name="$resolved"
  998. if [ -z "$service" ]; then
  999. project="@"
  1000. service=$(docker inspect "$image_name" | jq -r '.[0].RepoTags[0]')
  1001. service=${service//\//_}
  1002. fi
  1003. if [ -n "${data["$project/$service"]}" ]; then
  1004. previous=(${data["$project/$service"]})
  1005. previous=(${previous[@]:1})
  1006. current=($line)
  1007. sum=()
  1008. i=0; max=${#previous[@]}
  1009. while (( i < max )); do
  1010. sum+=($((${previous[$i]} + ${current[$i]})))
  1011. ((i++))
  1012. done
  1013. data["$project/$service"]="$ts ${sum[*]}"
  1014. else
  1015. data["$project/$service"]="$ts $line"
  1016. fi
  1017. done < <(
  1018. for container in "$@"; do
  1019. (
  1020. docker:container:stats "${container}" |
  1021. jq -r '
  1022. (.name | ltrimstr("/"))
  1023. + " " + (.read | sub("\\.[0-9]+Z"; "Z") | fromdate | tostring)
  1024. + " " + (.memory_stats.usage | tostring)
  1025. + " " + (.memory_stats.stats.inactive_file | tostring)
  1026. + " " + ((.memory_stats.usage - .memory_stats.stats.inactive_file) | tostring)
  1027. + " " + (.memory_stats.limit | tostring)
  1028. + " " + (.networks.eth0.rx_bytes | tostring)
  1029. + " " + (.networks.eth0.rx_packets | tostring)
  1030. + " " + (.networks.eth0.rx_errors | tostring)
  1031. + " " + (.networks.eth0.rx_dropped | tostring)
  1032. + " " + (.networks.eth0.tx_bytes | tostring)
  1033. + " " + (.networks.eth0.tx_packets | tostring)
  1034. + " " + (.networks.eth0.tx_errors | tostring)
  1035. + " " + (.networks.eth0.tx_dropped | tostring)
  1036. '
  1037. ) &
  1038. jobs=1
  1039. done
  1040. [ -n "$jobs" ] && wait
  1041. )
  1042. for label in "${!data[@]}"; do
  1043. echo "$label ${data[$label]}"
  1044. done
  1045. }
  1046. decorator._mangle_fn docker:containers:stats
  1047. export -f docker:containers:stats
  1048. col:normalize:size() {
  1049. local alignment=$1
  1050. awk -v alignment="$alignment" '{
  1051. # Store the entire line in the lines array.
  1052. lines[NR] = $0;
  1053. # Split the line into fields.
  1054. split($0, fields);
  1055. # Update max for each field.
  1056. for (i = 1; i <= length(fields); i++) {
  1057. if (length(fields[i]) > max[i]) {
  1058. max[i] = length(fields[i]);
  1059. }
  1060. }
  1061. }
  1062. END {
  1063. # Print lines with fields padded to max.
  1064. for (i = 1; i <= NR; i++) {
  1065. split(lines[i], fields);
  1066. line = "";
  1067. for (j = 1; j <= length(fields); j++) {
  1068. # Get alignment for the current field.
  1069. align = substr(alignment, j, 1);
  1070. if (align != "+") {
  1071. align = "-"; # Default to left alignment if not "+".
  1072. }
  1073. line = line sprintf("%" align max[j] "s ", fields[j]);
  1074. }
  1075. print line;
  1076. }
  1077. }'
  1078. }
  1079. rrd:create() {
  1080. local prefix="$1"
  1081. shift
  1082. local label="$1" step="300" src_def
  1083. shift
  1084. if [ -z "$VAR_DIR" ]; then
  1085. err "Unset \$VAR_DIR, can't create rrd graph"
  1086. return 1
  1087. fi
  1088. mkdir -p "$VAR_DIR"
  1089. if ! [ -d "$VAR_DIR" ]; then
  1090. err "Invalid \$VAR_DIR: '$VAR_DIR' is not a directory"
  1091. return 1
  1092. fi
  1093. if ! type -p rrdtool >/dev/null 2>&1; then
  1094. apt-get install rrdtool -y --force-yes </dev/null
  1095. if ! type -p rrdtool 2>/dev/null 2>&1; then
  1096. err "Couldn't find nor install 'rrdtool'."
  1097. return 1
  1098. fi
  1099. fi
  1100. local RRD_PATH="$VAR_DIR/rrd"
  1101. local RRD_FILE="$RRD_PATH/$prefix/$label.rrd"
  1102. mkdir -p "${RRD_FILE%/*}"
  1103. if [ -f "$RRD_FILE" ]; then
  1104. err "File '$RRD_FILE' already exists, use a different label."
  1105. return 1
  1106. fi
  1107. local rrd_ds_opts=()
  1108. for src_def in "$@"; do
  1109. IFS=":" read -r name type min max rra_types <<<"$src_def"
  1110. rra_types=${rra_types:-average,max,min}
  1111. rrd_ds_opts+=("DS:$name:$type:900:$min:$max")
  1112. done
  1113. local step=120
  1114. local times=( ## with steps 120 is 2mn datapoint
  1115. 2m:1w
  1116. 6m:3w
  1117. 30m:12w
  1118. 3h:1y
  1119. 1d:10y
  1120. 1w:2080w
  1121. )
  1122. rrd_rra_opts=()
  1123. for time in "${times[@]}"; do
  1124. rrd_rra_opts+=("RRA:"{AVERAGE,MIN,MAX}":0.5:$time")
  1125. done
  1126. cmd=(
  1127. rrdtool create "$RRD_FILE" \
  1128. --step "$step" \
  1129. "${rrd_ds_opts[@]}" \
  1130. "${rrd_rra_opts[@]}"
  1131. )
  1132. "${cmd[@]}" || {
  1133. err "Failed command: ${cmd[@]}"
  1134. return 1
  1135. }
  1136. }
  1137. rrd:update() {
  1138. local prefix="$1"
  1139. shift
  1140. while read-0a data; do
  1141. [ -z "$data" ] && continue
  1142. IFS="~" read -ra data <<<"${data// /\~}"
  1143. label="${data[0]}"
  1144. ts="${data[1]}"
  1145. for arg in "$@"; do
  1146. IFS="|" read -r name arg <<<"$arg"
  1147. rrd_label="${label}/${name}"
  1148. rrd_create_opt=()
  1149. rrd_update_opt="$ts"
  1150. for col_def in ${arg//,/ }; do
  1151. col=${col_def%%:*}; create_def=${col_def#*:}
  1152. rrd_update_opt="${rrd_update_opt}:${data[$col]}"
  1153. rrd_create_opt+=("$create_def")
  1154. done
  1155. local RRD_ROOT_PATH="$VAR_DIR/rrd"
  1156. local RRD_PATH="$RRD_ROOT_PATH/${prefix%/}"
  1157. local RRD_FILE="${RRD_PATH%/}/${rrd_label#/}.rrd"
  1158. if ! [ -f "$RRD_FILE" ]; then
  1159. info "Creating new RRD file '${RRD_FILE#$RRD_ROOT_PATH/}'"
  1160. if ! rrd:create "$prefix" "${rrd_label}" "${rrd_create_opt[@]}" </dev/null ; then
  1161. err "Couldn't create new RRD file ${rrd_label} with options: '${rrd_create_opt[*]}'"
  1162. return 1
  1163. fi
  1164. fi
  1165. rrdtool update "$RRD_FILE" "$rrd_update_opt" || {
  1166. err "update failed with options: '$rrd_update_opt'"
  1167. return 1
  1168. }
  1169. done
  1170. done
  1171. }
  1172. [ "$SOURCED" ] && return 0
  1173. ##
  1174. ## Command line processing
  1175. ##
  1176. cmdline.spec.gnu
  1177. cmdline.spec.reporting
  1178. cmdline.spec.gnu install
  1179. cmdline.spec::cmd:install:run() {
  1180. :
  1181. }
  1182. cmdline.spec.gnu get-type
  1183. cmdline.spec::cmd:get-type:run() {
  1184. vps:get-type
  1185. }
  1186. cmdline.spec:install:cmd:backup:run() {
  1187. : :posarg: BACKUP_SERVER 'Target backup server'
  1188. : :optfla: --ignore-domain-check \
  1189. "Allow to bypass the domain check in
  1190. compose file (only used in compose
  1191. installation)."
  1192. : :optfla: --ignore-ping-check "Allow to bypass the ping check of host."
  1193. local vps_type
  1194. vps_type=$(vps:get-type) || {
  1195. err "Failed to get type of installation."
  1196. return 1
  1197. }
  1198. if ! fn.exists "${vps_type}:install-backup"; then
  1199. err "type '${vps_type}' has no backup installation implemented yet."
  1200. return 1
  1201. fi
  1202. opts=()
  1203. [ "$opt_ignore_ping_check" ] &&
  1204. opts+=("--ignore-ping-check")
  1205. if [ "$vps_type" == "compose" ]; then
  1206. [ "$opt_ignore_domain_check" ] &&
  1207. opts+=("--ignore-domain-check")
  1208. fi
  1209. "cmdline.spec:install:cmd:${vps_type}-backup:run" "${opts[@]}" "$BACKUP_SERVER"
  1210. }
  1211. DEFAULT_BACKUP_SERVICE_NAME=rsync-backup
  1212. cmdline.spec.gnu compose-backup
  1213. cmdline.spec:install:cmd:compose-backup:run() {
  1214. : :posarg: BACKUP_SERVER 'Target backup server'
  1215. : :optval: --service-name,-s "YAML service name in compose
  1216. file to check for existence of key.
  1217. Defaults to '$DEFAULT_BACKUP_SERVICE_NAME'"
  1218. : :optval: --compose-file,-f "Compose file location. Defaults to
  1219. the value of '\$DEFAULT_COMPOSE_FILE'"
  1220. : :optfla: --ignore-domain-check \
  1221. "Allow to bypass the domain check in
  1222. compose file."
  1223. : :optfla: --ignore-ping-check "Allow to bypass the ping check of host."
  1224. local service_name compose_file
  1225. [ -e "/etc/compose/local.conf" ] && source /etc/compose/local.conf
  1226. compose_file=${opt_compose_file:-$DEFAULT_COMPOSE_FILE}
  1227. service_name=${opt_service_name:-$DEFAULT_BACKUP_SERVICE_NAME}
  1228. if ! [ -e "$compose_file" ]; then
  1229. err "Compose file not found in '$compose_file'."
  1230. return 1
  1231. fi
  1232. compose:install-backup "$BACKUP_SERVER" "$service_name" "$compose_file" \
  1233. "$opt_ignore_ping_check" "$opt_ignore_domain_check"
  1234. }
  1235. cmdline.spec:install:cmd:mailcow-backup:run() {
  1236. : :posarg: BACKUP_SERVER 'Target backup server'
  1237. : :optfla: --ignore-ping-check "Allow to bypass the ping check of host."
  1238. "mailcow:install-backup" "$BACKUP_SERVER" "$opt_ignore_ping_check"
  1239. }
  1240. cmdline.spec.gnu backup
  1241. cmdline.spec::cmd:backup:run() {
  1242. local vps_type
  1243. vps_type=$(vps:get-type) || {
  1244. err "Failed to get type of installation."
  1245. return 1
  1246. }
  1247. if ! fn.exists "cmdline.spec:backup:cmd:${vps_type}:run"; then
  1248. err "type '${vps_type}' has no backup process implemented yet."
  1249. return 1
  1250. fi
  1251. "cmdline.spec:backup:cmd:${vps_type}:run"
  1252. }
  1253. cmdline.spec:backup:cmd:mailcow:run() {
  1254. local cmd_line cron_line cmd
  1255. for f in mysql-backup mirror-dir; do
  1256. [ -e "/etc/cron.d/$f" ] || {
  1257. err "Can't find '/etc/cron.d/$f'."
  1258. echo " Have you forgotten to run 'vps install backup BACKUP_HOST' ?" >&2
  1259. return 1
  1260. }
  1261. if ! cron_line=$(cat "/etc/cron.d/$f" |
  1262. grep -v "^#" | grep "\* \* \*"); then
  1263. err "Can't find cron_line in '/etc/cron.d/$f'." \
  1264. "Have you modified it ?"
  1265. return 1
  1266. fi
  1267. cron_line=${cron_line%|*}
  1268. cmd_line=(${cron_line#*root})
  1269. if [ "$f" == "mirror-dir" ]; then
  1270. cmd=()
  1271. for arg in "${cmd_line[@]}"; do
  1272. [ "$arg" != "-q" ] && cmd+=("$arg")
  1273. done
  1274. else
  1275. cmd=("${cmd_line[@]}")
  1276. fi
  1277. code="${cmd[*]}"
  1278. echo "${WHITE}Launching:${NORMAL} ${code}"
  1279. {
  1280. {
  1281. (
  1282. ## Some commands are using colors that are already
  1283. ## set by this current program and will trickle
  1284. ## down unwantedly
  1285. ansi_color no
  1286. eval "${code}"
  1287. ) | sed -r "s/^/ ${GRAY}|${NORMAL} /g"
  1288. set_errlvl "${PIPESTATUS[0]}"
  1289. } 3>&1 1>&2 2>&3 | sed -r "s/^/ $DARKRED\!$NORMAL /g"
  1290. set_errlvl "${PIPESTATUS[0]}"
  1291. } 3>&1 1>&2 2>&3
  1292. if [ "$?" != "0" ]; then
  1293. err "Failed."
  1294. return 1
  1295. fi
  1296. done
  1297. info "Mysql backup and subsequent mirror-dir ${DARKGREEN}succeeded${NORMAL}."
  1298. }
  1299. set_errlvl() { return "${1:-1}"; }
  1300. cmdline.spec:backup:cmd:compose:run() {
  1301. local cron_line args
  1302. project_name=$(compose:project_name) || return 1
  1303. docker_cmd=$(compose:get_cron_docker_cmd) || return 1
  1304. echo "${WHITE}Launching:${NORMAL} docker exec -i "${project_name}_cron_1" $docker_cmd"
  1305. {
  1306. {
  1307. eval "docker exec -i \"${project_name}_cron_1\" $docker_cmd" | sed -r "s/^/ ${GRAY}|${NORMAL} /g"
  1308. set_errlvl "${PIPESTATUS[0]}"
  1309. } 3>&1 1>&2 2>&3 | sed -r "s/^/ $DARKRED\!$NORMAL /g"
  1310. set_errlvl "${PIPESTATUS[0]}"
  1311. } 3>&1 1>&2 2>&3
  1312. if [ "$?" != "0" ]; then
  1313. err "Failed."
  1314. return 1
  1315. fi
  1316. info "mirror-dir ${DARKGREEN}succeeded${NORMAL}."
  1317. }
  1318. cmdline.spec.gnu recover-target
  1319. cmdline.spec::cmd:recover-target:run() {
  1320. : :posarg: BACKUP_DIR 'Source directory on backup side'
  1321. : :posarg: HOST_DIR 'Target directory on host side'
  1322. : :optval: --backup-host,-B "The backup host"
  1323. : :optfla: --dry-run,-n "Don't do anything, instead tell what it
  1324. would do."
  1325. ## if no backup host take the one by default
  1326. backup_host="$opt_backup_host"
  1327. if [ -z "$backup_host" ]; then
  1328. backup_host_ident=$(backup-action get_default_backup_host_ident) || return 1
  1329. read -r backup_host ident <<<"$backup_host_ident"
  1330. fi
  1331. if [[ "$BACKUP_DIR" == /* ]]; then
  1332. err "BACKUP_DIR must be a relative path from the root of your backup."
  1333. return 1
  1334. fi
  1335. REAL_HOST_DIR=$(realpath "$HOST_DIR") || {
  1336. err "Can't find HOST_DIR '$HOST_DIR'."
  1337. return 1
  1338. }
  1339. export DRY_RUN="${opt_dry_run}"
  1340. backup-action recover-target "$backup_host" "$ident" "$BACKUP_DIR" "$REAL_HOST_DIR"
  1341. }
  1342. cmdline.spec.gnu odoo
  1343. cmdline.spec::cmd:odoo:run() {
  1344. :
  1345. }
  1346. cmdline.spec.gnu restart
  1347. cmdline.spec:odoo:cmd:restart:run() {
  1348. : :optval: --service,-s "The service (defaults to 'odoo')"
  1349. local out odoo_service
  1350. odoo_service="${opt_service:-odoo}"
  1351. project_name=$(compose:project_name) || return 1
  1352. container_name=$(compose:service:container:get "${project_name}" "${odoo_service}") || return 1
  1353. if ! out=$(docker restart "${container_name}" 2>/dev/null); then
  1354. if [[ "$out" == *"no matching entries in passwd file" ]]; then
  1355. warn "Catched docker bug. Restarting once more."
  1356. if ! out=$(docker restart "${container_name}" 2>&1); then
  1357. err "Can't restart container ${container_name}."
  1358. echo " output:" >&2
  1359. echo "$out" | prefix " ${GRAY}|${NORMAL} " >&2
  1360. exit 1
  1361. fi
  1362. else
  1363. err "Couldn't restart container ${container_name} (and no restart bug detected)."
  1364. exit 1
  1365. fi
  1366. fi
  1367. info "Container ${CYAN}${container_name}${NORMAL} was ${DARKGREEN}successfully${NORMAL} restarted."
  1368. if [[ "${container_name}" == "${project_name}_${odoo_service}_run_"* ]]; then
  1369. echo " ${WHITE}Note:${NORMAL} You can re-attach to your container with:" >&2
  1370. echo " docker container attach ${container_name}" >&2
  1371. fi
  1372. }
  1373. cmdline.spec.gnu restore
  1374. cmdline.spec:odoo:cmd:restore:run() {
  1375. : :posarg: ZIP_DUMP_LOCATION 'Source odoo dump file to restore
  1376. (can be a local file or an url)'
  1377. : :optval: --service,-s "The service (defaults to 'odoo')"
  1378. : :optval: --database,-D 'Target database (default if not specified)'
  1379. : :optfla: --neutralize,-n "Restore database in neutralized state."
  1380. : :optfla: --debug,-d "Display more information."
  1381. local out
  1382. unzip:require "6.0" || return 1
  1383. odoo_service="${opt_service:-odoo}"
  1384. if [[ "$ZIP_DUMP_LOCATION" == "http://"* ]] ||
  1385. [[ "$ZIP_DUMP_LOCATION" == "https://"* ]]; then
  1386. settmpdir ZIP_TMP_LOCATION
  1387. tmp_location="$ZIP_TMP_LOCATION/dump.zip"
  1388. curl -k -s -L "$ZIP_DUMP_LOCATION" > "$tmp_location" || {
  1389. err "Couldn't get '$ZIP_DUMP_LOCATION'."
  1390. exit 1
  1391. }
  1392. if [[ "$(dd if="$tmp_location" count=2 bs=1 2>/dev/null)" != "PK" ]]; then
  1393. err "Download doesn't seem to be a zip file."
  1394. dd if="$tmp_location" count=1 bs=256 | hd | prefix " ${GRAY}|${NORMAL} " >&2
  1395. exit 1
  1396. fi
  1397. info "Successfully downloaded '$ZIP_DUMP_LOCATION'"
  1398. echo " in '$tmp_location'." >&2
  1399. ZIP_DUMP_LOCATION="$tmp_location"
  1400. fi
  1401. [ -e "$ZIP_DUMP_LOCATION" ] || {
  1402. err "No file '$ZIP_DUMP_LOCATION' found." >&2
  1403. exit 1
  1404. }
  1405. opts_compose=()
  1406. [ -t 1 ] && opts_compose+=("--color")
  1407. [ "$opt_debug" ] && {
  1408. VERBOSE=1
  1409. opts_compose+=("--debug")
  1410. }
  1411. opts_load=()
  1412. [ "$opt_neutralize" ] && opts_load+=("--neutralize")
  1413. unzip:check-integrity "$ZIP_DUMP_LOCATION" || {
  1414. if [ -z "$opt_debug" ]; then
  1415. echo " Use \`\`--debug\`\` (or \`\`-d\`\`) to get more information." >&2
  1416. fi
  1417. return 1
  1418. }
  1419. project_name=$(compose:project_name) || exit 1
  1420. container_name=$(compose:service:container:get "${project_name}" "${odoo_service}") || exit 1
  1421. info "Found container ${CYAN}${container_name}${NORMAL} for service ${DARKYELLOW}$odoo_service${NORMAL}."
  1422. container:health:check-fix:no-matching-entries "${container_name}"
  1423. case "$?" in
  1424. 0)
  1425. debug "Container ${CYAN}${container_name}${NORMAL} is ${DARKGREEN}healthy${NORMAL}."
  1426. ;;
  1427. 1) err "Container ${CYAN}${container_name}${NORMAL} is not healthy."
  1428. exit 1
  1429. ;;
  1430. 2) info "Container ${CYAN}${container_name}${NORMAL} was ${DARKYELLOW}fixed${NORMAL}."
  1431. ;;
  1432. esac
  1433. msg_dbname=default
  1434. [ -n "$opt_database" ] && msg_dbname="'$opt_database'"
  1435. Wrap -vsd "drop $msg_dbname database of service ${DARKYELLOW}$odoo_service${NORMAL}" -- \
  1436. compose --no-hooks "${opts_compose[@]}" drop "$odoo_service" $opt_database || {
  1437. err "Error dropping $msg_dbname database of service ${DARKYELLOW}$odoo_service${NORMAL}:"
  1438. [ -z "$opt_debug" ] && {
  1439. echo " Use \`\`--debug\`\` (or \`\`-d\`\`) to get more information." >&2
  1440. }
  1441. exit 1
  1442. }
  1443. Wrap -vsd "restore $msg_dbname database of service ${DARKYELLOW}$odoo_service${NORMAL}" -- \
  1444. compose --no-hooks "${opts_compose[@]}" \
  1445. load "$odoo_service" $opt_database "${opts_load[@]}" < "$ZIP_DUMP_LOCATION" || {
  1446. err "Error restoring service ${DARKYELLOW}$odoo_service${NORMAL} to $msg_dbname database."
  1447. [ -z "$opt_debug" ] && {
  1448. echo " Use \`\`--debug\`\` (or \`\`-d\`\`) to get more information." >&2
  1449. }
  1450. exit 1
  1451. }
  1452. ## Restart odoo, ensure there is no bugs lingering on it.
  1453. cmdline.spec:odoo:cmd:restart:run --service "$odoo_service" || exit 1
  1454. }
  1455. cmdline.spec.gnu dump
  1456. cmdline.spec:odoo:cmd:dump:run() {
  1457. : :posarg: DUMP_ZIPFILE 'Target path to store odoo dump zip file.'
  1458. : :optval: --database,-D 'Target database (default if not specified)'
  1459. : :optval: --service,-s "The service (defaults to 'odoo')"
  1460. : :optfla: --debug,-d "Display debugging information"
  1461. : :optfla: --force,-f "Force the dump even if the file already exists"
  1462. odoo_service="${opt_service:-odoo}"
  1463. unzip:require "6.0" || return 1
  1464. msg_dbname=default
  1465. [ -n "$opt_database" ] && msg_dbname="'$opt_database'"
  1466. opts_compose=()
  1467. [ -t 1 ] && opts_compose+=("--color")
  1468. [ "$opt_debug" ] && {
  1469. VERBOSE=1
  1470. opts_compose+=("--debug")
  1471. }
  1472. if [ -e "$DUMP_ZIPFILE" ]; then
  1473. if [ -z "$opt_force" ]; then
  1474. err "File '$DUMP_ZIPFILE' already exists."
  1475. echo " Use \`\`--force\`\` (or \`\`-f\`\`) to overwrite it." >&2
  1476. exit 1
  1477. else
  1478. info "Removing previous file '$DUMP_ZIPFILE'."
  1479. rm -f "$DUMP_ZIPFILE" || return 1
  1480. fi
  1481. fi
  1482. max_retry=5
  1483. retry=1
  1484. success=
  1485. while [ "$retry" -le "$max_retry" ]; do
  1486. if [ "$retry" -gt 1 ]; then
  1487. info "Retry $retry/$max_retry to dump ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database."
  1488. fi
  1489. compose --no-hooks "${opts_compose[@]}" save "$odoo_service" $opt_database > "$DUMP_ZIPFILE" || {
  1490. err "Failed dumping ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database to '$DUMP_ZIPFILE'."
  1491. return 1
  1492. }
  1493. unzip:check-integrity "$DUMP_ZIPFILE" || {
  1494. err "Dump produced an invalid zip file '$DUMP_ZIPFILE'. Deleting it."
  1495. if [ -z "$opt_debug" ]; then
  1496. echo " Use \`\`--debug\`\` (or \`\`-d\`\`) to get more information." >&2
  1497. fi
  1498. rm -f "$DUMP_ZIPFILE"
  1499. ((retry++))
  1500. continue
  1501. }
  1502. success=1
  1503. break
  1504. done
  1505. if [ -z "$success" ]; then
  1506. err "Failed to dump ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database to '$DUMP_ZIPFILE'."
  1507. exit 1
  1508. fi
  1509. info "Successfully dumped ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database to '$DUMP_ZIPFILE'."
  1510. }
  1511. cmdline.spec.gnu drop
  1512. cmdline.spec:odoo:cmd:drop:run() {
  1513. : :optval: --database,-d 'Target database (default if not specified)'
  1514. : :optval: --service,-s "The service (defaults to 'odoo')"
  1515. odoo_service="${opt_service:-odoo}"
  1516. msg_dbname=default
  1517. [ -n "$opt_database" ] && msg_dbname="'$opt_database'"
  1518. compose --no-hooks drop "$odoo_service" $opt_database || {
  1519. err "Error dropping ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database."
  1520. exit 1
  1521. }
  1522. info "Successfully dropped ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database."
  1523. }
  1524. cmdline.spec.gnu set-cyclos-url
  1525. cmdline.spec:odoo:cmd:set-cyclos-url:run() {
  1526. : :optval: --database,-d "Target database ('odoo' if not specified)"
  1527. : :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
  1528. local URL
  1529. dbname=${opt_database:-odoo}
  1530. cyclos_service="${opt_service:-cyclos}"
  1531. project_name=$(compose:project_name) || exit 1
  1532. URL=$(compose:get_url "${project_name}" "${cyclos_service}") || exit 1
  1533. Wrap -d "set cyclos url to '$URL'" <<EOF || exit 1
  1534. echo "UPDATE res_company SET cyclos_server_url = '$URL/api' WHERE id=1;" |
  1535. compose:psql "$project_name" "$dbname" || {
  1536. err "Failed to set cyclos url value in '$dbname' database."
  1537. exit 1
  1538. }
  1539. EOF
  1540. }
  1541. cmdline.spec.gnu fix-sso
  1542. cmdline.spec:odoo:cmd:fix-sso:run() {
  1543. : :optval: --database,-d "Target database ('odoo' if not specified)"
  1544. local public_user_id project_name dbname
  1545. dbname=${opt_database:-odoo}
  1546. project_name=$(compose:project_name) || exit 1
  1547. public_user_id=$(odoo:get_public_user_id "${project_name}" "${dbname}") || exit 1
  1548. Wrap -d "fix website's object to 'public_user' (id=$public_user_id)" <<EOF || exit 1
  1549. echo "UPDATE website SET user_id = $public_user_id;" |
  1550. compose:psql "$project_name" "$dbname" || {
  1551. err "Failed to set website's object user_id to public user's id ($public_user_id) in '$dbname' database."
  1552. exit 1
  1553. }
  1554. EOF
  1555. }
  1556. cmdline.spec.gnu cyclos
  1557. cmdline.spec::cmd:cyclos:run() {
  1558. :
  1559. }
  1560. cmdline.spec:cyclos:cmd:dump:run() {
  1561. : :posarg: DUMP_GZFILE 'Target path to store odoo dump gz file.'
  1562. : :optval: --database,-d "Target database ('cyclos' if not specified)"
  1563. : :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
  1564. cyclos_service="${opt_service:-cyclos}"
  1565. cyclos_database="${opt_database:-cyclos}"
  1566. project_name=$(compose:project_name) || exit 1
  1567. container_id=$(compose:service:container_one "$project_name" "${cyclos_service}") || exit 1
  1568. Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1569. docker stop "$container_id" || exit 1
  1570. Wrap -d "Dump postgres database '${cyclos_database}'." -- \
  1571. postgres:dump "${project_name}" "$cyclos_database" "$DUMP_GZFILE" || exit 1
  1572. Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1573. docker start "${container_id}" || exit 1
  1574. }
  1575. cmdline.spec.gnu restore
  1576. cmdline.spec:cyclos:cmd:restore:run() {
  1577. : :posarg: GZ_DUMP_LOCATION 'Source cyclos dump file to restore
  1578. (can be a local file or an url)'
  1579. : :optval: --service,-s "The service (defaults to 'cyclos')"
  1580. : :optval: --database,-d 'Target database (default if not specified)'
  1581. local out
  1582. cyclos_service="${opt_service:-cyclos}"
  1583. cyclos_database="${opt_database:-cyclos}"
  1584. project_name=$(compose:project_name) || exit 1
  1585. url=$(compose:get_url "${project_name}" "${cyclos_service}") || return 1
  1586. container_id=$(compose:service:container_one "$project_name" "${cyclos_service}") || exit 1
  1587. if [[ "$GZ_DUMP_LOCATION" == "http://"* ]] ||
  1588. [[ "$GZ_DUMP_LOCATION" == "https://"* ]]; then
  1589. settmpdir GZ_TMP_LOCATION
  1590. tmp_location="$GZ_TMP_LOCATION/dump.gz"
  1591. Wrap -d "get '$GZ_DUMP_LOCATION'" <<EOF || exit 1
  1592. ## Note that curll version before 7.76.0 do not have
  1593. curl -k -s -L "$GZ_DUMP_LOCATION" --fail \\
  1594. > "$tmp_location" || {
  1595. echo "Error fetching ressource. Is url correct ?" >&2
  1596. exit 1
  1597. }
  1598. if [[ "\$(dd if="$tmp_location" count=2 bs=1 2>/dev/null |
  1599. hexdump -v -e "/1 \"%02x\"")" != "1f8b" ]]; then
  1600. err "Download doesn't seem to be a gzip file."
  1601. dd if="$tmp_location" count=1 bs=256 | hd | prefix " ${GRAY}|${NORMAL} " >&2
  1602. exit 1
  1603. fi
  1604. EOF
  1605. GZ_DUMP_LOCATION="$tmp_location"
  1606. fi
  1607. [ -e "$GZ_DUMP_LOCATION" ] || {
  1608. err "No file '$GZ_DUMP_LOCATION' found." >&2
  1609. exit 1
  1610. }
  1611. Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1612. docker stop "$container_id" || exit 1
  1613. ## XXXvlab: making the assumption that the postgres username should
  1614. ## be the same as the cyclos service selected (which is the default,
  1615. ## but not always the case).
  1616. Wrap -d "restore postgres database '${cyclos_database}'." -- \
  1617. postgres:restore "$project_name" "$GZ_DUMP_LOCATION" "${cyclos_service}@${cyclos_database}" || exit 1
  1618. ## ensure that the database is not locked
  1619. Wrap -d "check and remove database lock if any" -- \
  1620. cyclos:unlock "${project_name}" "${cyclos_database}" || exit 1
  1621. Wrap -d "set root url to '$url'" -- \
  1622. cyclos:set_root_url "${project_name}" "${cyclos_database}" "${url}" || exit 1
  1623. Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1624. docker start "${container_id}" || exit 1
  1625. }
  1626. cmdline.spec.gnu set-root-url
  1627. cmdline.spec:cyclos:cmd:set-root-url:run() {
  1628. : :optval: --database,-d "Target database ('cyclos' if not specified)"
  1629. : :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
  1630. local URL
  1631. cyclos_database=${opt_database:-cyclos}
  1632. cyclos_service="${opt_service:-cyclos}"
  1633. project_name=$(compose:project_name) || exit 1
  1634. url=$(compose:get_url "${project_name}" "${cyclos_service}") || exit 1
  1635. container_id=$(compose:service:container_one "${project_name}" "${cyclos_service}") || exit 1
  1636. Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1637. docker stop "$container_id" || exit 1
  1638. Wrap -d "set root url to '$url'" -- \
  1639. cyclos:set_root_url "${project_name}" "${cyclos_database}" "${url}" || exit 1
  1640. Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1641. docker start "${container_id}" || exit 1
  1642. }
  1643. cmdline.spec.gnu unlock
  1644. cmdline.spec:cyclos:cmd:unlock:run() {
  1645. : :optval: --database,-d "Target database ('cyclos' if not specified)"
  1646. : :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
  1647. local URL
  1648. cyclos_database=${opt_database:-cyclos}
  1649. cyclos_service="${opt_service:-cyclos}"
  1650. project_name=$(compose:project_name) || exit 1
  1651. container_id=$(compose:service:container_one "${project_name}" "${cyclos_service}") || exit 1
  1652. Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1653. docker stop "$container_id" || exit 1
  1654. Wrap -d "check and remove database lock if any" -- \
  1655. cyclos:unlock "${project_name}" "${cyclos_database}" || exit 1
  1656. Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1657. docker start "${container_id}" || exit 1
  1658. }
  1659. cmdline.spec.gnu rocketchat
  1660. cmdline.spec::cmd:rocketchat:run() {
  1661. :
  1662. }
  1663. cmdline.spec.gnu drop-indexes
  1664. cmdline.spec:rocketchat:cmd:drop-indexes:run() {
  1665. : :optval: --database,-d "Target database ('rocketchat' if not specified)"
  1666. : :optval: --service,-s "The rocketchat service name (defaults to 'rocketchat')"
  1667. local URL
  1668. rocketchat_database=${opt_database:-rocketchat}
  1669. rocketchat_service="${opt_service:-rocketchat}"
  1670. project_name=$(compose:project_name) || exit 1
  1671. container_id=$(compose:service:container_one "${project_name}" "${rocketchat_service}") || exit 1
  1672. Wrap -d "stop ${DARKYELLOW}${rocketchat_service}${NORMAL}'s container" -- \
  1673. docker stop "$container_id" || exit 1
  1674. errlvl=0
  1675. Wrap -d "drop indexes" -- \
  1676. rocketchat:drop-indexes "${project_name}" "${rocketchat_database}" || {
  1677. errlvl=1
  1678. errmsg="Failed to drop indexes"
  1679. }
  1680. Wrap -d "start ${DARKYELLOW}${rocketchat_service}${NORMAL}'s container" -- \
  1681. docker start "${container_id}" || exit 1
  1682. if [ "$errlvl" != 0 ]; then
  1683. err "$errmsg"
  1684. fi
  1685. exit "$errlvl"
  1686. }
  1687. cmdline.spec.gnu nextcloud
  1688. cmdline.spec::cmd:nextcloud:run() {
  1689. :
  1690. }
  1691. cmdline.spec.gnu upgrade
  1692. cmdline.spec:nextcloud:cmd:upgrade:run() {
  1693. : :posarg: [TARGET_VERSION] "Target version to migrate to"
  1694. : :optval: --service,-s "The nexcloud service name (defaults to 'nextcloud')"
  1695. local URL
  1696. nextcloud_service="${opt_service:-nextcloud}"
  1697. project_name=$(compose:project_name) || exit 1
  1698. containers=$(compose:service:containers "${project_name}" "${nextcloud_service}") || exit 1
  1699. container_stopped=()
  1700. if [ -n "$containers" ]; then
  1701. for container in $containers; do
  1702. Wrap -d "stop ${DARKYELLOW}${nextcloud_service}${NORMAL}'s container" -- \
  1703. docker stop "$container" || {
  1704. err "Failed to stop container '$container'."
  1705. exit 1
  1706. }
  1707. container_stopped+=("$container")
  1708. done
  1709. fi
  1710. before_version=$(nextcloud:src:version) || exit 1
  1711. ## -q to remove the display of ``compose`` related information
  1712. ## like relation resolution.
  1713. ## --no-hint to remove the final hint about modifying your
  1714. ## ``compose.yml``.
  1715. compose -q upgrade "$nextcloud_service" --no-hint "$TARGET_VERSION"
  1716. errlvl="$?"
  1717. after_version=$(nextcloud:src:version)
  1718. if [ "$after_version" != "$before_version" ]; then
  1719. desc="update \`compose.yml\` to set ${DARKYELLOW}$nextcloud_service${NORMAL}'s "
  1720. desc+="docker image to actual code version ${WHITE}${after_version}${NORMAL}"
  1721. Wrap -d "$desc" -- \
  1722. compose:file:value-change \
  1723. "${nextcloud_service}.docker-compose.image" \
  1724. "docker.0k.io/nextcloud:${after_version}-myc" || exit 1
  1725. fi
  1726. if [ "$errlvl" == 0 ]; then
  1727. echo "${WHITE}Launching final compose${NORMAL}"
  1728. compose up || exit 1
  1729. fi
  1730. exit "$errlvl"
  1731. }
  1732. cmdline.spec.gnu check-fix
  1733. cmdline.spec::cmd:check-fix:run() {
  1734. : :posarg: [SERVICES...] "Optional service to check"
  1735. : :optval: --check,-c "Specify a check or a list of checks separated by commas"
  1736. : :optfla: --silent,-s "Don't ouput anything if everything goes well"
  1737. local project_name service_name containers container check
  1738. all_checks=$(declare -F |
  1739. egrep '^declare -fx? container:health:check-fix:[^ ]+$' |
  1740. cut -f 4 -d ":")
  1741. checks=(${opt_check//,/ })
  1742. for check in "${checks[@]}"; do
  1743. fn.exists container:health:check-fix:$check || {
  1744. err "check '$check' not found."
  1745. return 1
  1746. }
  1747. done
  1748. if [ "${#checks[*]}" == 0 ]; then
  1749. checks=($all_checks)
  1750. fi
  1751. ## XXXvlab: could make it parallel
  1752. project_name=$(compose:project_name) || exit 1
  1753. containers=($(compose:project:containers "${project_name}")) || exit 1
  1754. found=
  1755. for container in "${containers[@]}"; do
  1756. service_name=$(docker ps --filter id="$container" --format '{{.Label "com.docker.compose.service"}}')
  1757. if [ "${#SERVICES[@]}" -gt 0 ]; then
  1758. [[ " ${SERVICES[*]} " == *" $service_name "* ]] || continue
  1759. fi
  1760. found=1
  1761. one_bad=
  1762. for check in "${checks[@]}"; do
  1763. if ! container:health:check-fix:"$check" "$container"; then
  1764. one_bad=1
  1765. fi
  1766. done
  1767. if [ -z "$opt_silent" ] && [ -z "$one_bad" ]; then
  1768. Elt "containers have been checked for ${DARKYELLOW}$service_name${NORMAL}"
  1769. Feedback
  1770. fi
  1771. done
  1772. if [ -z "$found" ]; then
  1773. if [ -z "$opt_silent" ]; then
  1774. if [ "${#SERVICES[@]}" -gt 0 ]; then
  1775. warn "No container for given services found in current project '$project_name'."
  1776. else
  1777. warn "No container found for current project '$project_name'."
  1778. fi
  1779. fi
  1780. return 1
  1781. fi
  1782. }
  1783. awk:require() {
  1784. local require_at_least="$1" version already_installed
  1785. while true; do
  1786. if ! version=$(awk --version 2>/dev/null); then
  1787. version=""
  1788. else
  1789. version=${version%%,*}
  1790. version=${version##* }
  1791. fi
  1792. if [ -z "$version" ] || version_gt "$require_at_least" "$version"; then
  1793. if [ -z "$already_installed" ]; then
  1794. if [ -z "$version" ]; then
  1795. info "No 'gawk' available, probably using a clone. Installing 'gawk'..."
  1796. else
  1797. info "Found gawk version '$version'. Updating 'gawk'..."
  1798. fi
  1799. apt-get install gawk -y </dev/null || {
  1800. err "Failed to install 'gawk'."
  1801. return 1
  1802. }
  1803. already_installed=true
  1804. else
  1805. if [ -z "$version" ]; then
  1806. err "No 'gawk' available even after having installed one"
  1807. else
  1808. err "'gawk' version '$version' is lower than required" \
  1809. "'$require_at_least' even after updating 'gawk'."
  1810. fi
  1811. return 1
  1812. fi
  1813. continue
  1814. fi
  1815. return 0
  1816. done
  1817. }
  1818. unzip:require() {
  1819. local require_at_least="$1" version already_installed
  1820. while true; do
  1821. if ! version=$(unzip -v 2>/dev/null); then
  1822. version=""
  1823. else
  1824. ## Version is in the first line, here's an example:
  1825. ##
  1826. ## UnZip 6.00 of 20 April 2009, by Debian. Original by Info-ZIP.
  1827. version=${version##UnZip }
  1828. version=${version%% of*}
  1829. fi
  1830. if [ -z "$version" ] || version_gt "$require_at_least" "$version"; then
  1831. if [ -z "$already_installed" ]; then
  1832. if [ -z "$version" ]; then
  1833. info "No 'unzip' available, probably using a clone. Installing 'unzip'..."
  1834. else
  1835. info "Found unzip version '$version'. Updating 'unzip'..."
  1836. fi
  1837. apt-get install unzip -y </dev/null || {
  1838. err "Failed to install 'unzip'."
  1839. return 1
  1840. }
  1841. already_installed=true
  1842. else
  1843. if [ -z "$version" ]; then
  1844. err "No 'unzip' available even after having installed one"
  1845. else
  1846. err "'unzip' version '$version' is lower than required" \
  1847. "'$require_at_least' even after updating 'unzip'."
  1848. fi
  1849. return 1
  1850. fi
  1851. continue
  1852. fi
  1853. return 0
  1854. done
  1855. }
  1856. unzip:check-integrity() {
  1857. local file="$1"
  1858. if ! out=$(unzip -t "$file" 2>&1); then
  1859. ## remove OK files
  1860. out=$(printf "%s\n" "$out" | egrep -v "^\s+testing: .*\s+OK$")
  1861. ## if last line is not "No errors detected in compressed data of $file."
  1862. ## then it's an error
  1863. last_line=${out##*$'\n'}
  1864. if [ "$last_line" == "No errors detected in compressed data of $file." ]; then
  1865. return 0
  1866. fi
  1867. err "Zip file '$file' failed integrity check."
  1868. if [ -n "$VERBOSE" ]; then
  1869. printf "%s\n" "$out" | prefix " ${GRAY}|${NORMAL} " >&2
  1870. fi
  1871. return 1
  1872. fi
  1873. return 0
  1874. }
  1875. resource:list() {
  1876. declare -F | egrep -- '-fx? stats:[a-zA-Z0-9_.-]+$' | cut -f 3- -d " " | cut -f 2- -d ":"
  1877. }
  1878. cmdline.spec.gnu stats
  1879. cmdline.spec::cmd:stats:run() {
  1880. : :optval: --format,-f "Either 'silent', 'raw', or 'pretty', default is pretty."
  1881. : :optfla: --silent,-s "Shorthand for '--format silent'"
  1882. : :optval: --resource,-r "resource(s) separated with a comma
  1883. (available resources: $(resource:list))"
  1884. local project_name service_name containers container check
  1885. if [[ -n "${opt_silent}" ]]; then
  1886. if [[ -n "${opt_format}" ]]; then
  1887. err "'--silent' conflict with option '--format'."
  1888. return 1
  1889. fi
  1890. opt_format=s
  1891. fi
  1892. opt_format="${opt_format:-pretty}"
  1893. case "${opt_format}" in
  1894. raw|r)
  1895. opt_format="raw"
  1896. :
  1897. ;;
  1898. silent|s)
  1899. opt_format="silent"
  1900. ;;
  1901. pretty|p)
  1902. opt_format="pretty"
  1903. awk:require 4.1.4 || return 1
  1904. ;;
  1905. *)
  1906. err "Invalid value '$opt_format' for option --format"
  1907. echo " use either 'raw' (shorthand 'r'), 'silent' (shorthand 's') or pretty (shorthand 'p')." >&2
  1908. return 1
  1909. esac
  1910. local resources=(c.{memory,network} load_avg disk-2)
  1911. if [ -n "${opt_resource}" ]; then
  1912. resources=(${opt_resource//,/ })
  1913. fi
  1914. local not_found=()
  1915. for resource in "${resources[@]}"; do
  1916. if ! fn.exists "stats:$resource"; then
  1917. not_found+=("$resource")
  1918. fi
  1919. done
  1920. if [[ "${#not_found[@]}" -gt 0 ]]; then
  1921. not_found_msg=$(printf "%s, " "${not_found[@]}")
  1922. not_found_msg=${not_found_msg%, }
  1923. err "Unsupported resource(s) provided: ${not_found_msg}"
  1924. echo " resource must be one-of:" >&2
  1925. resource:list | prefix " - " >&2
  1926. return 1
  1927. fi
  1928. :state-dir:
  1929. for resource in "${resources[@]}"; do
  1930. if [ "$opt_format" == "pretty" ]; then
  1931. echo "${WHITE}$resource${NORMAL}:"
  1932. stats:"$resource" "$opt_format" 2>&1 | prefix " "
  1933. else
  1934. stats:"$resource" "$opt_format" 2>&1 | prefix "$resource "
  1935. fi
  1936. set_errlvl "${PIPESTATUS[0]}" || return 1
  1937. done
  1938. }
  1939. stats:c.memory() {
  1940. local format="$1"
  1941. local out
  1942. container_to_check=($(docker:running_containers)) || exit 1
  1943. out=$(docker:containers:stats "${container_to_check[@]}")
  1944. printf "%s\n" "$out" | rrd:update "containers" "memory|3:usage:GAUGE:U:U,4:inactive:GAUGE:U:U" || {
  1945. return 1
  1946. }
  1947. case "${format:-p}" in
  1948. raw|r)
  1949. printf "%s\n" "$out" | cut -f 1-5 -d " "
  1950. ;;
  1951. pretty|p)
  1952. awk:require 4.1.4 || return 1
  1953. {
  1954. echo "container" "__total____" "buffered____" "resident____"
  1955. printf "%s\n" "$out" |
  1956. awk '
  1957. {
  1958. offset = strftime("%z", $2);
  1959. print $1, substr($0, index($0,$3));
  1960. }' | cut -f 1-4 -d " " |
  1961. numfmt --field 2-4 --to=iec-i --format=%8.1fB |
  1962. sed -r 's/(\.[0-9])([A-Z]?iB)/\1:\2/g' |
  1963. sort
  1964. } | col:normalize:size -+++ |
  1965. sed -r 's/(\.[0-9]):([A-Z]?iB)/\1 \2/g' |
  1966. header:make
  1967. ;;
  1968. esac
  1969. }
  1970. stats:c.network() {
  1971. local format="$1"
  1972. local out
  1973. container_to_check=($(docker:running_containers)) || exit 1
  1974. out=$(docker:containers:stats "${container_to_check[@]}")
  1975. cols=(
  1976. {rx,tx}_{bytes,packets,errors,dropped}
  1977. )
  1978. idx=5 ## starting column idx for next fields
  1979. defs=()
  1980. for col in "${cols[@]}"; do
  1981. defs+=("$((idx++)):${col}:COUNTER:U:U")
  1982. done
  1983. OLDIFS="$IFS"
  1984. IFS="," defs="${defs[*]}"
  1985. IFS="$OLDIFS"
  1986. printf "%s\n" "$out" |
  1987. rrd:update "containers" \
  1988. "network|${defs}" || {
  1989. return 1
  1990. }
  1991. case "${format:-p}" in
  1992. raw|r)
  1993. printf "%s\n" "$out" | cut -f 1,2,7- -d " "
  1994. ;;
  1995. pretty|p)
  1996. awk:require 4.1.4 || return 1
  1997. {
  1998. echo "container" "_" "_" "_" "RX" "_" "_" "_" "TX"
  1999. echo "_" "__bytes____" "__packets" "__errors" "__dropped" "__bytes____" "__packets" "__errors" "__dropped"
  2000. printf "%s\n" "$out" |
  2001. awk '
  2002. {
  2003. offset = strftime("%z", $2);
  2004. print $1, substr($0, index($0,$7));
  2005. }' |
  2006. numfmt --field 2,6 --to=iec-i --format=%8.1fB |
  2007. numfmt --field 3,4,5,7,8,9 --to=si --format=%8.1f |
  2008. sed -r 's/(\.[0-9])([A-Z]?(iB|B)?)/\1:\2/g' |
  2009. sort
  2010. } | col:normalize:size -++++++++ |
  2011. sed -r '
  2012. s/(\.[0-9]):([A-Z]?iB)/\1 \2/g;
  2013. s/(\.[0-9]):([KMGTPE])/\1 \2/g;
  2014. s/ ([0-9]+)\.0:B/\1 /g;
  2015. s/ ([0-9]+)\.0:/\1 /g;
  2016. ' |
  2017. header:make 2
  2018. ;;
  2019. esac
  2020. }
  2021. header:make() {
  2022. local nb_line="${1:-1}"
  2023. local line
  2024. while ((nb_line-- > 0)); do
  2025. read-0a line
  2026. echo "${GRAY}$(printf "%s" "$line" | sed -r 's/_/ /g')${NORMAL}"
  2027. done
  2028. cat
  2029. }
  2030. stats:load_avg() {
  2031. local format="$1"
  2032. local out
  2033. out=$(host:sys:load_avg)
  2034. printf "%s\n" "$out" | rrd:update "" "load_avg|2:load_avg_1:GAUGE:U:U,3:load_avg_5:GAUGE:U:U,4:load_avg_15:GAUGE:U:U" || {
  2035. return 1
  2036. }
  2037. case "${format:-p}" in
  2038. raw|r)
  2039. printf "%s\n" "$out" | cut -f 2-5 -d " "
  2040. ;;
  2041. pretty|p)
  2042. {
  2043. echo "___1m" "___5m" "__15m"
  2044. printf "%s\n" "$out" | cut -f 3-5 -d " "
  2045. } | col:normalize:size +++ | header:make
  2046. ;;
  2047. esac
  2048. }
  2049. stats:disk() {
  2050. local format="$1"
  2051. local out
  2052. disk_used_size=$(df --output=used,size / | tail -n 1) || return 1
  2053. out=$(printf "%s " "" "$(date +%s)" "${disk_used_size// / }")
  2054. printf "%s\n" "$out" | rrd:update "" "disk|2:used:GAUGE:U:U,3:size:GAUGE:U:U" || {
  2055. return 1
  2056. }
  2057. case "${format:-p}" in
  2058. raw|r)
  2059. printf "%s\n" "$out" | cut -f 2-4 -d " "
  2060. ;;
  2061. pretty|p)
  2062. {
  2063. echo "__used" "__size"
  2064. printf "%s\n" "$out" | cut -f 3-5 -d " " |
  2065. numfmt --field 1-2 --from-unit=1024 --to=iec-i --format=%8.1fB
  2066. } | col:normalize:size ++ | header:make
  2067. ;;
  2068. esac
  2069. }
  2070. stats:disk-2() {
  2071. local format="$1"
  2072. local out
  2073. out=$(disk:usage:stats)
  2074. printf "%s\n" "$out" | rrd:update "disk-2" "size|2:size:GAUGE:U:U" || {
  2075. return 1
  2076. }
  2077. case "${format:-p}" in
  2078. raw|r)
  2079. printf "%s\n" "$out"
  2080. ;;
  2081. pretty|p)
  2082. awk:require 4.1.4 || return 1
  2083. echo "${WHITE}host:${NORMAL}"
  2084. echo " ${WHITE}general:${NORMAL}"
  2085. {
  2086. echo "ident" "size____"
  2087. printf "%s\n" "$out" | grep ^host/ | sed -r 's%^host/%%g' |
  2088. egrep "^(used|available) " |
  2089. cut -f 1,3 -d " " |
  2090. numfmt --field 2-4 --to=iec-i --from-unit=1024 --format=%8.1fB |
  2091. sed -r 's/(\.[0-9])([A-Z]?i?B)/\1:\2/g'
  2092. } | col:normalize:size -+ |
  2093. sed -r '
  2094. s/(\.[0-9]):([A-Z]?iB)/\1 \2/g;
  2095. s/(\.[0-9]):([KMGTPE])/\1 \2/g;
  2096. s/ ([0-9]+)\.0:B/\1 B/g;
  2097. s/ ([0-9]+)\.0:/\1 /g;
  2098. ' |
  2099. header:make |
  2100. prefix " "
  2101. echo " ${WHITE}usage:${NORMAL}"
  2102. {
  2103. echo "ident" "size____"
  2104. printf "%s\n" "$out" | grep ^host/ | sed -r 's%^host/%%g' |
  2105. egrep -v "^(used|available) " |
  2106. sort -rn -k 3 |
  2107. cut -f 1,3 -d " " |
  2108. numfmt --field 2-4 --to=iec-i --from-unit=1024 --format=%8.1fB |
  2109. sed -r 's/(\.[0-9])([A-Z]?i?B)/\1:\2/g'
  2110. } | col:normalize:size -+ |
  2111. sed -r '
  2112. s/(\.[0-9]):([A-Z]?iB)/\1 \2/g;
  2113. s/(\.[0-9]):([KMGTPE])/\1 \2/g;
  2114. s/ ([0-9]+)\.0:B/\1 B/g;
  2115. s/ ([0-9]+)\.0:/\1 /g;
  2116. ' |
  2117. header:make |
  2118. prefix " "
  2119. echo "${WHITE}data:${NORMAL}"
  2120. {
  2121. echo "folder" "size____"
  2122. printf "%s\n" "$out" | grep ^data/ | sed -r 's%^data/%%g' |
  2123. sort -rn -k 3 |
  2124. cut -f 1,3 -d " " |
  2125. numfmt --field 2 --to=iec-i --from-unit=1024 --format=%8.1fB |
  2126. sed -r 's/(\.[0-9])([A-Z]?i?B)/\1:\2/g'
  2127. } | col:normalize:size -+ |
  2128. sed -r '
  2129. s/(\.[0-9]):([A-Z]?iB)/\1 \2/g;
  2130. s/(\.[0-9]):([KMGTPE])/\1 \2/g;
  2131. s/ ([0-9]+)\.0:B/\1 B/g;
  2132. s/ ([0-9]+)\.0:/\1 /g;
  2133. ' |
  2134. header:make |
  2135. prefix " "
  2136. echo "${WHITE}docker:${NORMAL}"
  2137. {
  2138. echo "ident" "size____"
  2139. printf "%s\n" "$out" | grep ^docker/ | sed -r 's%^docker/%%g' |
  2140. egrep -v "^[a-z0-9_]+_r " |
  2141. sort -rn -k 3 |
  2142. cut -f 1,3 -d " " |
  2143. numfmt --field 2-4 --to=iec-i --from-unit=1024 --format=%8.1fB |
  2144. sed -r 's/(\.[0-9])([A-Z]?i?B)/\1:\2/g'
  2145. } | col:normalize:size -+ |
  2146. sed -r '
  2147. s/(\.[0-9]):([A-Z]?iB)/\1 \2/g;
  2148. s/(\.[0-9]):([KMGTPE])/\1 \2/g;
  2149. s/ ([0-9]+)\.0:B/\1 B/g;
  2150. s/ ([0-9]+)\.0:/\1 /g;
  2151. ' |
  2152. header:make |
  2153. prefix " "
  2154. ;;
  2155. esac
  2156. }
  2157. disk:total() {
  2158. local disk_used_size
  2159. disk_used_size=$(df --output=used,size "/") || return 1
  2160. disk_used_size=${disk_used_size##*$'\n'}
  2161. disk_used_size=${disk_used_size// / }
  2162. printf "%s" "$disk_used_size"
  2163. }
  2164. disk:usage:stats() {
  2165. local disk_used_size
  2166. data_sum=0
  2167. while read-0 size_name; do
  2168. name=${size_name#*$'\t'}
  2169. [ -d "$name" ] || continue
  2170. size=${size_name%%$'\t'*}
  2171. name=${name##*/}
  2172. data_sum=$((data_sum + size))
  2173. printf "%s %s %s\n" "data/${name##*/}" "$(date +%s)" "$size"
  2174. done < <(du -s0 /srv/datastore/data/*)
  2175. docker_sum=0
  2176. if ! out=$(du -s /var/lib/docker/containers/*/*-json.log); then
  2177. err "Failed to get disk usage of docker container logs."
  2178. return 1
  2179. fi
  2180. total_usage_logs=$(printf "%s" "$out" | awk '{s+=$1} END {print s}')
  2181. docker_sum=$((docker_sum + total_usage_logs))
  2182. printf "%s %s %s\n" "docker/logs" "$(date +%s)" "$total_usage_logs"
  2183. if ! out=$(docker system df --format '{{.Type}}\t{{.Size}}\t{{.Reclaimable}}'); then
  2184. err "Failed to get disk usage of docker system."
  2185. return 1
  2186. fi
  2187. date=$(date +%s)
  2188. while read-0 ident size reclaimable; do
  2189. docker_sum=$((docker_sum + size))
  2190. printf "%s %s %s\n" "docker/$ident" "$date" "$size"
  2191. printf "%s %s %s\n" "docker/${ident}_r" "$date" "$reclaimable"
  2192. done < <(
  2193. printf "%s" "$out" |
  2194. sed -r 's/^(.*)\t([0-9.]+)([kMGT]?B)\t([0-9.]+)([kMGT]?B).*$/\1\t\2\t\3\t\4\t\5/g' |
  2195. awk -F'\t' '
  2196. # Function to convert sizes to kilobytes
  2197. function convert_to_kb(value, unit) {
  2198. if (unit == "B") {
  2199. return value / 1024;
  2200. } else if (unit == "kB") {
  2201. return value;
  2202. } else if (unit == "MB") {
  2203. return value * 1024;
  2204. } else if (unit == "GB") {
  2205. return value * 1024 * 1024;
  2206. } else if (unit == "TB") {
  2207. return value * 1024 * 1024 * 1024;
  2208. }
  2209. return 0;
  2210. }
  2211. {
  2212. size_value = $2;
  2213. size_unit = $3;
  2214. reclaimable_value = $4;
  2215. reclaimable_unit = $5;
  2216. type = $1;
  2217. # Convert type to lowercase and replace spaces with underscores
  2218. type=tolower(type);
  2219. gsub(/ /, "_", type);
  2220. # Convert size and reclaimable to kilobytes
  2221. size_kb = convert_to_kb(size_value, size_unit);
  2222. reclaimable_kb = convert_to_kb(reclaimable_value, reclaimable_unit);
  2223. printf "%s\0%d\0%d\0", type, size_kb, reclaimable_kb;
  2224. }'
  2225. )
  2226. used_size=$(disk:total)
  2227. used=${used_size%% *}
  2228. available=${used_size##* }
  2229. date=$(date +%s)
  2230. printf "%s %s %s\n" "host/used" "$date" "${used}"
  2231. printf "%s %s %s\n" "host/available" "$date" "${available}"
  2232. printf "%s %s %s\n" "host/docker" "$date" "$docker_sum"
  2233. printf "%s %s %s\n" "host/data" "$date" "$data_sum"
  2234. printf "%s %s %s\n" "host/other" "$date" "$((used - docker_sum - data_sum))"
  2235. }
  2236. host:sys:load_avg() {
  2237. local uptime
  2238. uptime="$(uptime)"
  2239. uptime=${uptime##*: }
  2240. uptime=${uptime//,/}
  2241. printf "%s " "" "$(date +%s)" "$uptime"
  2242. }
  2243. cmdline.spec.gnu mongo
  2244. cmdline.spec::cmd:mongo:run() {
  2245. :
  2246. }
  2247. cmdline.spec.gnu upgrade
  2248. cmdline.spec:mongo:cmd:upgrade:run() {
  2249. : :posarg: [TARGET_VERSION] "Target version to migrate to"
  2250. : :optval: --service,-s "The mongo service name (defaults to 'mongo')"
  2251. : :optfla: --debug,-d "Display debugging information"
  2252. local URL
  2253. mongo_service="${opt_service:-mongo}"
  2254. available_actions=$(compose --get-available-actions) || exit 1
  2255. available_actionable_services=($(e "$available_actions" | yq 'keys().[]'))
  2256. if [[ " ${available_actionable_services[*]} " != *" $mongo_service "* ]]; then
  2257. err "Service '$mongo_service' was not found in current 'compose.yml'."
  2258. exit 1
  2259. fi
  2260. opts_compose=()
  2261. if [ -n "$opt_debug" ]; then
  2262. opts_compose+=("--debug")
  2263. else
  2264. opts_compose+=("-q")
  2265. fi
  2266. project_name=$(compose:project_name) || exit 1
  2267. containers="$(compose:service:containers "${project_name}" "${mongo_service}")" || exit 1
  2268. ## XXXvlab: quick hack, to make more beautiful later
  2269. cron_container=$(compose:service:containers "${project_name}" "cron")
  2270. containers="$containers $cron_container"
  2271. docker stop "$cron_container" >/dev/null 2>&1 || true
  2272. before_version=
  2273. uptodate=
  2274. upgraded=
  2275. msgerr=()
  2276. while read-0a-err errlvl line; do
  2277. echo "$line"
  2278. rline=$(printf "%s" "$line" | sed_compat "s/$__color_sequence_regex//g")
  2279. case "$rline" in
  2280. "II Current mongo version: "*)
  2281. before_version="${rline#II Current mongo version: }"
  2282. ;;
  2283. "II ${mongo_service} is already up-to-date.")
  2284. if [ -z "$before_version" ]; then
  2285. msgerr+=("expected a 'current version' line before the 'up-to-date' one.")
  2286. continue
  2287. fi
  2288. after_version="$before_version"
  2289. uptodate=1
  2290. ;;
  2291. "II Successfully upgraded from ${before_version} to "*)
  2292. after_version="${rline#II Successfully upgraded from ${before_version} to }"
  2293. upgraded=1
  2294. ;;
  2295. *)
  2296. :
  2297. ;;
  2298. esac
  2299. done < <(
  2300. ## -q to remove the display of ``compose`` related information
  2301. ## like relation resolution.
  2302. ## -c on the upgrade action to force color
  2303. ansi_color=yes p-0a-err compose -c "${opts_compose[@]}" upgrade "$mongo_service" --no-hint -c "$TARGET_VERSION"
  2304. )
  2305. if [ "$errlvl" != 0 ]; then
  2306. exit "$errlvl"
  2307. fi
  2308. if [ -n "$uptodate" ]; then
  2309. for container in "${containers[@]}"; do
  2310. [ -n "$container" ] || continue
  2311. Wrap -d "start ${DARKYELLOW}${mongo_service}${NORMAL}'s container" -- \
  2312. docker start "$container" || {
  2313. err "Failed to start container '$container'."
  2314. exit 1
  2315. }
  2316. done
  2317. exit 0
  2318. fi
  2319. if [ -z "$upgraded" ]; then
  2320. err "Unexpected output of 'upgrade' action with errorlevel 0 and without success"
  2321. exit 1
  2322. fi
  2323. desc="update \`compose.yml\` to set ${DARKYELLOW}$mongo_service${NORMAL}'s "
  2324. desc+="docker image to actual code version ${WHITE}${after_version}${NORMAL}"
  2325. Wrap -d "$desc" -- \
  2326. compose:file:value-change \
  2327. "${mongo_service}.docker-compose.image" \
  2328. "docker.0k.io/mongo:${after_version}-myc" || exit 1
  2329. echo "${WHITE}Launching final compose${NORMAL}"
  2330. compose up || exit 1
  2331. }
  2332. cmdline.spec.gnu postgres
  2333. cmdline.spec::cmd:postgres:run() {
  2334. :
  2335. }
  2336. cmdline.spec.gnu upgrade
  2337. cmdline.spec:postgres:cmd:upgrade:run() {
  2338. : :posarg: [TARGET_VERSION] "Target version to migrate to"
  2339. : :optval: --service,-s "The postgre service name (defaults to 'postgres')"
  2340. : :optfla: --debug,-d "Display debugging information"
  2341. local URL
  2342. depends yq
  2343. postgres_service="${opt_service:-postgres}"
  2344. available_actions=$(compose --get-available-actions) || exit 1
  2345. available_actionable_services=($(e "$available_actions" | yq 'keys().[]'))
  2346. if [[ " ${available_actionable_services[*]} " != *" $postgres_service "* ]]; then
  2347. err "Service '$postgres_service' was not found in current 'compose.yml'."
  2348. exit 1
  2349. fi
  2350. opts_compose=()
  2351. if [ -n "$opt_debug" ]; then
  2352. opts_compose+=("--debug")
  2353. else
  2354. opts_compose+=("-q")
  2355. fi
  2356. project_name=$(compose:project_name) || exit 1
  2357. containers=($(compose:service:containers "${project_name}" "${postgres_service}")) || exit 1
  2358. ## XXXvlab: quick hack, to make more beautiful later
  2359. cron_container=$(compose:service:containers "${project_name}" "cron")
  2360. containers+=("$cron_container")
  2361. docker stop "$cron_container" >/dev/null 2>&1 || true
  2362. before_version=
  2363. uptodate=
  2364. upgraded=
  2365. msgerr=()
  2366. while read-0a-err errlvl line; do
  2367. echo "$line"
  2368. rline=$(printf "%s" "$line" | sed_compat "s/$__color_sequence_regex//g")
  2369. case "$rline" in
  2370. "II Current postgres version: "*)
  2371. before_version="${rline#II Current postgres version: }"
  2372. ;;
  2373. "II ${postgres_service} is already up-to-date.")
  2374. if [ -z "$before_version" ]; then
  2375. msgerr+=("expected a 'current version' line before the 'up-to-date' one.")
  2376. continue
  2377. fi
  2378. after_version="$before_version"
  2379. uptodate=1
  2380. ;;
  2381. "II Successfully upgraded from ${before_version} to "*)
  2382. after_version="${rline#II Successfully upgraded from ${before_version} to }"
  2383. upgraded=1
  2384. ;;
  2385. *)
  2386. :
  2387. ;;
  2388. esac
  2389. done < <(
  2390. ## -q to remove the display of ``compose`` related information
  2391. ## like relation resolution.
  2392. ## -c on the upgrade action to force color
  2393. ansi_color=yes p-0a-err compose -q -c "${opts_compose[@]}" upgrade "$postgres_service" --no-hint -c "$TARGET_VERSION" 2>&1
  2394. )
  2395. if [ "$errlvl" != 0 ]; then
  2396. exit "$errlvl"
  2397. fi
  2398. if [ -n "$uptodate" ]; then
  2399. for container in "${containers[@]}"; do
  2400. [ -n "$container" ] || continue
  2401. Wrap -d "start ${DARKYELLOW}${postgres_service}${NORMAL}'s container" -- \
  2402. docker start "$container" || {
  2403. err "Failed to start container '$container'."
  2404. exit 1
  2405. }
  2406. done
  2407. exit 0
  2408. fi
  2409. if [ -z "$upgraded" ]; then
  2410. err "Unexpected output of 'upgrade' action with errorlevel 0 and without success"
  2411. exit 1
  2412. fi
  2413. desc="update \`compose.yml\` to set ${DARKYELLOW}$postgres_service${NORMAL}'s "
  2414. desc+="docker image to actual code version ${WHITE}${after_version}${NORMAL}"
  2415. Wrap -d "$desc" -- \
  2416. compose:file:value-change \
  2417. "${postgres_service}.docker-compose.image" \
  2418. "docker.0k.io/postgres:${after_version}-myc" || exit 1
  2419. echo "${WHITE}Launching final compose${NORMAL}"
  2420. compose up || exit 1
  2421. }
  2422. cmdline.spec.gnu bench
  2423. cmdline.spec::cmd:bench:run() {
  2424. depends sysbench
  2425. nbthread=$(lscpu | egrep "^CPU\(s\):" | cut -f 2 -d : | xargs echo)
  2426. single=$(sysbench cpu --cpu-max-prime=20000 run --threads=1 | grep "events per" | cut -f 2 -d : | xargs echo)
  2427. threaded=$(sysbench cpu --cpu-max-prime=20000 run --threads="$nbthread" | grep "events per" | cut -f 2 -d : | xargs echo)
  2428. echo "$threaded / $single / $nbthread"
  2429. }
  2430. cmdline.spec::cmd:monujo:run() {
  2431. :
  2432. }
  2433. cmdline.spec.gnu monujo
  2434. cmdline.spec:monujo:cmd:set-version:run() {
  2435. : :posarg: TARGET_VERSION "Target version to put in options"
  2436. : :optval: --service,-s "The monujo service name (defaults to 'monujo')"
  2437. local URL
  2438. monujo_service="${opt_service:-monujo}"
  2439. project_name=$(compose:project_name) || exit 1
  2440. ## check if service exists in compose.yml
  2441. compose:service:exists "$project_name" "$monujo_service" || {
  2442. err "Service '$monujo_service' was not found in current 'compose.yml'."
  2443. exit 1
  2444. }
  2445. Wrap -d "Changing ${DARKYELLOW}$monujo_service${NORMAL} version" -- \
  2446. compose:file:value-change \
  2447. "${monujo_service}.options.version" \
  2448. "${TARGET_VERSION}" || exit 1
  2449. }
  2450. cmdline::parse "$@"