You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2398 lines
72 KiB

  1. #!/bin/bash
  2. . /etc/shlib >/dev/null 2>&1 || {
  3. echo "Error: you don't have kal-shlib-core installed."
  4. echo ""
  5. echo " You might want to add `deb.kalysto.org` deb repository, you'll need root access,"
  6. echo " so you might want to run these command after a \`sudo -i\` for instance..."
  7. echo ""
  8. echo " echo deb https://deb.kalysto.org no-dist kal-alpha kal-beta kal-main \\"
  9. echo " > /etc/apt/sources.list.d/kalysto.org.list"
  10. echo " wget -O - https://deb.kalysto.org/conf/public-key.gpg | apt-key add -"
  11. echo " apt-get update -o Dir::Etc::sourcelist=sources.list.d/kalysto.org.list \\"
  12. echo " -o Dir::Etc::sourceparts=- -o APT::Get::List-Cleanup=0"
  13. echo ""
  14. echo " Then install package kal-shlib-*:"
  15. echo ""
  16. echo " apt install kal-shlib-{common,cmdline,config,cache,docker,pretty}"
  17. echo ""
  18. exit 1
  19. } >&2
  20. include common
  21. include parse
  22. include cmdline
  23. include config
  24. include cache
  25. include fn
  26. include docker
  27. [[ "${BASH_SOURCE[0]}" != "${0}" ]] && SOURCED=true
  28. version=0.1
  29. desc='Install backup'
  30. help=""
  31. version_gt() { test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"; }
  32. read-0a-err() {
  33. local ret="$1" eof="" idx=0 last=
  34. read -r -- "${ret?}" <<<"0"
  35. shift
  36. while [ "$1" ]; do
  37. last=$idx
  38. read -r -- "$1" || {
  39. ## Put this last value in ${!ret}
  40. eof="$1"
  41. read -r -- "$ret" <<<"${!eof}"
  42. break
  43. }
  44. ((idx++))
  45. shift
  46. done
  47. [ -z "$eof" ] || {
  48. if [ "$last" != 0 ]; then
  49. echo "Error: read-0a-err couldn't fill all value" >&2
  50. read -r -- "$ret" <<<"127"
  51. else
  52. if [ -z "${!ret}" ]; then
  53. echo "Error: last value is not a number, did you finish with an errorlevel ?" >&2
  54. read -r -- "$ret" <<<"126"
  55. fi
  56. fi
  57. false
  58. }
  59. }
  60. p-0a-err() {
  61. "$@"
  62. echo -n "$?"
  63. }
  64. docker:running-container-projects() {
  65. :cache: scope=session
  66. docker ps --format '{{.Label "com.docker.compose.project"}}' | sort | uniq
  67. }
  68. decorator._mangle_fn docker:running-container-projects
  69. ssh:mk-private-key() {
  70. local host="$1" service_name="$2"
  71. (
  72. settmpdir VPS_TMPDIR
  73. ssh-keygen -t rsa -N "" -f "$VPS_TMPDIR/rsync_rsa" -C "$service_name@$host" >/dev/null
  74. cat "$VPS_TMPDIR/rsync_rsa"
  75. )
  76. }
  77. mailcow:has-images-running() {
  78. local images
  79. images=$(docker ps --format '{{.Image}}' | sort | uniq)
  80. [[ $'\n'"$images" == *$'\n'"mailcow/"* ]]
  81. }
  82. mailcow:has-container-project-mentionning-mailcow() {
  83. local projects
  84. projects=$(docker:running-container-projects) || return 1
  85. [[ $'\n'"$projects"$'\n' == *mailcow* ]]
  86. }
  87. mailcow:has-running-containers() {
  88. mailcow:has-images-running ||
  89. mailcow:has-container-project-mentionning-mailcow
  90. }
  91. mailcow:get-root() {
  92. :cache: scope=session
  93. local dir
  94. for dir in {/opt{,/apps},/root}/mailcow-dockerized; do
  95. [ -d "$dir" ] || continue
  96. [ -r "$dir/mailcow.conf" ] || continue
  97. echo "$dir"
  98. return 0
  99. done
  100. return 1
  101. }
  102. decorator._mangle_fn mailcow:get-root
  103. compose:get-compose-yml() {
  104. :cache: scope=session
  105. local path
  106. path=$(DEBUG=1 DRY_RUN=1 compose 2>&1 | egrep '^\s+-e HOST_COMPOSE_YML_FILE=' | cut -f 2- -d "=" | cut -f 1 -d " ")
  107. [ -e "$path" ] || return 1
  108. echo "$path"
  109. }
  110. decorator._mangle_fn compose:get-compose-yml
  111. export -f compose:get-compose-yml
  112. compose:has-container-project-myc() {
  113. local projects
  114. projects=$(docker:running-container-projects) || return 1
  115. [[ $'\n'"$projects"$'\n' == *$'\n'"myc"$'\n'* ]]
  116. }
  117. compose:file:value-change() {
  118. local key="$1" value="$2"
  119. local compose_yml
  120. if ! compose_yml=$(compose:get-compose-yml); then
  121. err "Couldn't locate your 'compose.yml' file."
  122. return 1
  123. fi
  124. yaml:file:value-change "$compose_yml" "$key" "$value" || return 1
  125. }
  126. export -f compose:file:value-change
  127. yaml:file:value-change() {
  128. local file="$1" key="$2" value="$3" first=1 count=0 diff=""
  129. (
  130. cd "${file%/*}"
  131. while read-0 hunk; do
  132. if [ -n "$first" ]; then
  133. diff+="$hunk"
  134. first=
  135. continue
  136. fi
  137. if [[ "$hunk" =~ $'\n'"+"[[:space:]]+"${key##*.}:" ]]; then
  138. ((count++))
  139. diff+="$hunk" >&2
  140. else
  141. :
  142. # echo "discarding:" >&2
  143. # e "$hunk" | prefix " | " >&2
  144. fi
  145. done < <(
  146. export DEBUG=
  147. settmpdir YQ_TEMP
  148. cp "${file}" "$YQ_TEMP/compose.yml" &&
  149. yq -i ".${key} = \"${value}\"" "$YQ_TEMP/compose.yml" &&
  150. sed -ri 's/^([^# ])/\n\0/g' "$YQ_TEMP/compose.yml" &&
  151. diff -u0 -Z "${file}" "$YQ_TEMP/compose.yml" |
  152. sed -r "s/^(@@.*)$/\x00\1/g;s%^(\+\+\+) [^\t]+%\1 ${file}%g"
  153. printf "\0"
  154. )
  155. if [[ "$count" == 0 ]]; then
  156. err "No change made to '$file'."
  157. return 1
  158. fi
  159. if [[ "$count" != 1 ]]; then
  160. err "compose file change request seems dubious and was refused:"
  161. e "$diff" | prefix " | " >&2
  162. return 1
  163. fi
  164. echo Applying: >&2
  165. e "$diff" | prefix " | " >&2
  166. patch <<<"$diff"
  167. ) || exit 1
  168. }
  169. export -f yaml:file:value-change
  170. type:is-mailcow() {
  171. mailcow:get-root >/dev/null ||
  172. mailcow:has-running-containers
  173. }
  174. type:is-compose() {
  175. compose:get-compose-yml >/dev/null &&
  176. compose:has-container-project-myc
  177. }
  178. vps:get-type() {
  179. :cache: scope=session
  180. local fn
  181. for fn in $(declare -F | cut -f 3 -d " " | egrep "^type:is-"); do
  182. "$fn" && {
  183. echo "${fn#type:is-}"
  184. return 0
  185. }
  186. done
  187. return 1
  188. }
  189. decorator._mangle_fn vps:get-type
  190. mirror-dir:sources() {
  191. :cache: scope=session
  192. if ! shyaml get-values default.sources < /etc/mirror-dir/config.yml; then
  193. err "Couldn't query 'default.sources' in '/etc/mirror-dir/config.yml'."
  194. return 1
  195. fi
  196. }
  197. decorator._mangle_fn mirror-dir:sources
  198. mirror-dir:check-add() {
  199. local elt="$1" sources
  200. sources=$(mirror-dir:sources) || return 1
  201. if [[ $'\n'"$sources"$'\n' == *$'\n'"$elt"$'\n'* ]]; then
  202. info "Volume $elt already in sources"
  203. else
  204. Elt "Adding directory $elt"
  205. sed -i "/sources:/a\ - \"${elt}\"" \
  206. /etc/mirror-dir/config.yml
  207. Feedback || return 1
  208. fi
  209. }
  210. mirror-dir:check-add-vol() {
  211. local elt="$1"
  212. mirror-dir:check-add "/var/lib/docker/volumes/*_${elt}-*/_data"
  213. }
  214. ## The first colon is to prevent auto-export of function from shlib
  215. : ; bash-bug-5() { { cat; } < <(e) >/dev/null; ! cat "$1"; } && bash-bug-5 <(e) 2>/dev/null &&
  216. export BASH_BUG_5=1 && unset -f bash_bug_5
  217. wrap() {
  218. local label="$1" code="$2"
  219. shift 2
  220. export VERBOSE=1
  221. interpreter=/bin/bash
  222. if [ -n "$BASH_BUG_5" ]; then
  223. (
  224. settmpdir tmpdir
  225. fname=${label##*/}
  226. e "$code" > "$tmpdir/$fname" &&
  227. chmod +x "$tmpdir/$fname" &&
  228. Wrap -vsd "$label" -- "$interpreter" "$tmpdir/$fname" "$@"
  229. )
  230. else
  231. Wrap -vsd "$label" -- "$interpreter" <(e "$code") "$@"
  232. fi
  233. }
  234. ping_check() {
  235. #global ignore_ping_check
  236. local host="$1"
  237. ip=$(getent ahosts "$host" | egrep "^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\s+" |
  238. head -n 1 | cut -f 1 -d " ") || return 1
  239. my_ip=$(curl -s myip.kal.fr)
  240. if [ "$ip" != "$my_ip" ]; then
  241. if [ -n "$ignore_ping_check" ]; then
  242. warn "IP of '$host' ($ip) doesn't match mine ($my_ip). Ignoring due to ``--ignore-ping-check`` option."
  243. else
  244. err "IP of '$host' ($ip) doesn't match mine ($my_ip). Use ``--ignore-ping-check`` to ignore check."
  245. return 1
  246. fi
  247. fi
  248. }
  249. mailcow:install-backup() {
  250. local BACKUP_SERVER="$1" ignore_ping_check="$2" mailcow_root DOMAIN
  251. ## find installation
  252. mailcow_root=$(mailcow:get-root) || {
  253. err "Couldn't find a valid mailcow root directory."
  254. return 1
  255. }
  256. ## check ok
  257. DOMAIN=$(cat "$mailcow_root/.env" | grep ^MAILCOW_HOSTNAME= | cut -f 2 -d =) || {
  258. err "Couldn't find MAILCOW_HOSTNAME in file \"$mailcow_root/.env\"."
  259. return 1
  260. }
  261. ping_check "$DOMAIN" || return 1
  262. MYSQL_ROOT_PASSWORD=$(cat "$mailcow_root/.env" | grep ^DBROOT= | cut -f 2 -d =) || {
  263. err "Couldn't find DBROOT in file \"$mailcow_root/.env\"."
  264. return 1
  265. }
  266. if docker compose >/dev/null 2>&1; then
  267. MYSQL_CONTAINER=${MYSQL_CONTAINER:-mailcowdockerized-mysql-mailcow-1}
  268. else
  269. MYSQL_CONTAINER=${MYSQL_CONTAINER:-mailcowdockerized_mysql-mailcow_1}
  270. fi
  271. container_id=$(docker ps -f name="$MYSQL_CONTAINER" --format "{{.ID}}")
  272. if [ -z "$container_id" ]; then
  273. err "Couldn't find docker container named '$MYSQL_CONTAINER'."
  274. return 1
  275. fi
  276. export KEY_BACKUP_ID="mailcow"
  277. export MYSQL_ROOT_PASSWORD
  278. export MYSQL_CONTAINER
  279. export BACKUP_SERVER
  280. export DOMAIN
  281. wrap "Install rsync-backup on host" "
  282. cd /srv/charm-store/rsync-backup
  283. bash ./hooks/install.d/60-install.sh
  284. " || return 1
  285. wrap "Mysql dump install" "
  286. cd /srv/charm-store/mariadb
  287. bash ./hooks/install.d/60-backup.sh
  288. " || return 1
  289. ## Using https://github.com/mailcow/mailcow-dockerized/blob/master/helper-scripts/backup_and_restore.sh
  290. for elt in "vmail{,-attachments-vol}" crypt redis rspamd postfix; do
  291. mirror-dir:check-add-vol "$elt" || return 1
  292. done
  293. mirror-dir:check-add "$mailcow_root" || return 1
  294. mirror-dir:check-add "/var/backups/mysql" || return 1
  295. mirror-dir:check-add "/etc" || return 1
  296. dest="$BACKUP_SERVER"
  297. dest="${dest%/*}"
  298. ssh_options=()
  299. if [[ "$dest" == *":"* ]]; then
  300. port="${dest##*:}"
  301. dest="${dest%%:*}"
  302. ssh_options=(-p "$port")
  303. else
  304. port=""
  305. dest="${dest%%:*}"
  306. fi
  307. info "You can run this following command from an host having admin access to $dest:"
  308. echo " (Or send it to a backup admin of $dest)" >&2
  309. echo "ssh ${ssh_options[@]} myadmin@$dest ssh-key add '$(cat /var/lib/rsync/.ssh/id_rsa.pub)'"
  310. }
  311. compose:has_domain() {
  312. local compose_file="$1" host="$2" name conf relation relation_value domain server_aliases
  313. while read-0 name conf ; do
  314. name=$(e "$name" | shyaml get-value)
  315. if [[ "$name" =~ ^[a-zA-Z0-9-]+\.[a-zA-Z0-9-]+ ]]; then
  316. [ "$host" == "$name" ] && return 0
  317. fi
  318. rel=$(e "$conf" | shyaml -y get-value relations 2>/dev/null) || continue
  319. for relation in web-proxy publish-dir; do
  320. relation_value=$(e "$rel" | shyaml -y get-value "$relation" 2>/dev/null) || continue
  321. while read-0 label conf_relation; do
  322. domain=$(e "$conf_relation" | shyaml get-value "domain" 2>/dev/null) && {
  323. [ "$host" == "$domain" ] && return 0
  324. }
  325. server_aliases=$(e "$conf_relation" | shyaml get-values "server-aliases" 2>/dev/null) && {
  326. [[ $'\n'"$server_aliases" == *$'\n'"$host"$'\n'* ]] && return 0
  327. }
  328. done < <(e "$relation_value" | shyaml -y key-values-0)
  329. done
  330. done < <(shyaml -y key-values-0 < "$compose_file")
  331. return 1
  332. }
  333. compose:install-backup() {
  334. local BACKUP_SERVER="$1" service_name="$2" compose_file="$3" ignore_ping_check="$4" ignore_domain_check="$5"
  335. ## XXXvlab: far from perfect as it mimics and depends internal
  336. ## logic of current default way to get a domain in compose-core
  337. host=$(hostname)
  338. if ! compose:has_domain "$compose_file" "$host"; then
  339. if [ -n "$ignore_domain_check" ]; then
  340. warn "domain of '$host' not found in compose file '$compose_file'. Ignoring due to ``--ignore-domain-check`` option."
  341. else
  342. err "domain of '$host' not found in compose file '$compose_file'. Use ``--ignore-domain-check`` to ignore check."
  343. return 1
  344. fi
  345. fi
  346. ping_check "$host" || return 1
  347. if [ -e "/root/.ssh/rsync_rsa" ]; then
  348. warn "deleting private key in /root/.ssh/rsync_rsa, has we are not using it anymore."
  349. rm -fv /root/.ssh/rsync_rsa
  350. fi
  351. if [ -e "/root/.ssh/rsync_rsa.pub" ]; then
  352. warn "deleting public key in /root/.ssh/rsync_rsa.pub, has we are not using it anymore."
  353. rm -fv /root/.ssh/rsync_rsa.pub
  354. fi
  355. if service_cfg=$(cat "$compose_file" |
  356. shyaml get-value -y "$service_name" 2>/dev/null); then
  357. info "Entry for service ${DARKYELLOW}$service_name${NORMAL}" \
  358. "is already present in '$compose_file'."
  359. cfg=$(e "$service_cfg" | shyaml get-value -y options) || {
  360. err "No ${WHITE}options${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
  361. "entry in '$compose_file'."
  362. return 1
  363. }
  364. private_key=$(e "$cfg" | shyaml get-value private-key) || return 1
  365. target=$(e "$cfg" | shyaml get-value target) || return 1
  366. if [ "$target" != "$BACKUP_SERVER" ]; then
  367. err "Existing backup target '$target' is different" \
  368. "from specified '$BACKUP_SERVER'"
  369. return 1
  370. fi
  371. else
  372. private_key=$(ssh:mk-private-key "$host" "$service_name")
  373. cat <<EOF >> "$compose_file"
  374. $service_name:
  375. options:
  376. ident: $host
  377. target: $BACKUP_SERVER
  378. private-key: |
  379. $(e "$private_key" | sed -r 's/^/ /g')
  380. EOF
  381. fi
  382. dest="$BACKUP_SERVER"
  383. dest="${dest%/*}"
  384. ssh_options=()
  385. if [[ "$dest" == *":"* ]]; then
  386. port="${dest##*:}"
  387. dest="${dest%%:*}"
  388. ssh_options=(-p "$port")
  389. else
  390. port=""
  391. dest="${dest%%:*}"
  392. fi
  393. info "You can run this following command from an host having admin access to $dest:"
  394. echo " (Or send it to a backup admin of $dest)" >&2
  395. ## We remove ending label (label will be added or not in the
  396. ## private key, and thus here, depending on the version of
  397. ## openssh-client)
  398. public_key=$(ssh-keygen -y -f <(e "$private_key"$'\n') | sed -r 's/ [^ ]+@[^ ]+$//')
  399. echo "ssh ${ssh_options[@]} myadmin@$dest ssh-key add '$public_key compose@$host'"
  400. }
  401. backup-action() {
  402. local action="$1"
  403. shift
  404. vps_type=$(vps:get-type) || {
  405. err "Failed to get type of installation."
  406. return 1
  407. }
  408. if ! fn.exists "${vps_type}:${action}"; then
  409. err "type '${vps_type}' has no ${vps_type}:${action} implemented yet."
  410. return 1
  411. fi
  412. "${vps_type}:${action}" "$@"
  413. }
  414. compose:get_default_backup_host_ident() {
  415. local service_name="$1" ## Optional
  416. local compose_file service_cfg cfg target
  417. compose_file=$(compose:get-compose-yml)
  418. service_name="${service_name:-rsync-backup}"
  419. if ! service_cfg=$(cat "$compose_file" |
  420. shyaml get-value -y "$service_name" 2>/dev/null); then
  421. err "No service named '$service_name' found in 'compose.yml'."
  422. return 1
  423. fi
  424. cfg=$(e "$service_cfg" | shyaml get-value -y options) || {
  425. err "No ${WHITE}options${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
  426. "entry in '$compose_file'."
  427. return 1
  428. }
  429. if ! target=$(e "$cfg" | shyaml get-value target); then
  430. err "No ${WHITE}options.target${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
  431. "entry in '$compose_file'."
  432. fi
  433. if ! target=$(e "$cfg" | shyaml get-value target); then
  434. err "No ${WHITE}options.target${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
  435. "entry in '$compose_file'."
  436. fi
  437. if ! ident=$(e "$cfg" | shyaml get-value ident); then
  438. err "No ${WHITE}options.ident${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
  439. "entry in '$compose_file'."
  440. fi
  441. echo "$target $ident"
  442. }
  443. mailcow:get_default_backup_host_ident() {
  444. local content cron_line ident found dest cmd_line
  445. if ! [ -e "/etc/cron.d/mirror-dir" ]; then
  446. err "No '/etc/cron.d/mirror-dir' found."
  447. return 1
  448. fi
  449. content=$(cat /etc/cron.d/mirror-dir) || {
  450. err "Can't read '/etc/cron.d/mirror-dir'."
  451. return 1
  452. }
  453. if ! cron_line=$(e "$content" | grep "mirror-dir backup"); then
  454. err "Can't find 'mirror-dir backup' line in '/etc/cron.d/mirror-dir'."
  455. return 1
  456. fi
  457. cron_line=${cron_line%|*}
  458. cmd_line=(${cron_line#*root})
  459. found=
  460. dest=
  461. for arg in "${cmd_line[@]}"; do
  462. [ -n "$found" ] && {
  463. dest="$arg"
  464. break
  465. }
  466. [ "$arg" == "-d" ] && {
  467. found=1
  468. }
  469. done
  470. if ! [[ "$dest" =~ ^[\'\"a-zA-Z0-9:/.-]+$ ]]; then
  471. err "Can't find valid destination in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'."
  472. return 1
  473. fi
  474. if [[ "$dest" == \"*\" ]] || [[ "$dest" == \'*\' ]]; then
  475. ## unquoting, the eval should be safe because of previous check
  476. dest=$(eval e "$dest")
  477. fi
  478. if [ -z "$dest" ]; then
  479. err "Can't find destination in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'."
  480. return 1
  481. fi
  482. ## looking for ident
  483. found=
  484. ident=
  485. for arg in "${cmd_line[@]}"; do
  486. [ -n "$found" ] && {
  487. ident="$arg"
  488. break
  489. }
  490. [ "$arg" == "-h" ] && {
  491. found=1
  492. }
  493. done
  494. if ! [[ "$ident" =~ ^[\'\"a-zA-Z0-9.-]+$ ]]; then
  495. err "Can't find valid identifier in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'."
  496. return 1
  497. fi
  498. if [[ "$ident" == \"*\" ]] || [[ "$ident" == \'*\' ]]; then
  499. ## unquoting, the eval should be safe because of previous check
  500. ident=$(eval e "$ident")
  501. fi
  502. if [ -z "$ident" ]; then
  503. err "Can't find destination in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'."
  504. return 1
  505. fi
  506. echo "$dest $ident"
  507. }
  508. compose:service:containers() {
  509. local project="$1" service="$2"
  510. docker ps \
  511. --filter label="com.docker.compose.project=$project" \
  512. --filter label="compose.master-service=$service" \
  513. --format="{{.ID}}"
  514. }
  515. export -f compose:service:containers
  516. compose:service:container_one() {
  517. local project="$1" service="$2" container_id
  518. {
  519. read-0a container_id || {
  520. err "service ${DARKYELLOW}$service${NORMAL} has no running container."
  521. return 1
  522. }
  523. if read-0a _; then
  524. err "service ${DARKYELLOW}$service${NORMAL} has more than one running container."
  525. return 1
  526. fi
  527. } < <(compose:service:containers "$project" "$service")
  528. echo "$container_id"
  529. }
  530. export -f compose:service:container_one
  531. compose:service:container_first() {
  532. local project="$1" service="$2" container_id
  533. {
  534. read-0a container_id || {
  535. err "service ${DARKYELLOW}$service${NORMAL} has no running container."
  536. return 1
  537. }
  538. if read-0a _; then
  539. warn "service ${DARKYELLOW}$service${NORMAL} has more than one running container."
  540. fi
  541. } < <(compose:service:containers "$project" "$service")
  542. echo "$container_id"
  543. }
  544. export -f compose:service:container_first
  545. docker:running_containers() {
  546. :cache: scope=session
  547. docker ps --format="{{.ID}}"
  548. }
  549. decorator._mangle_fn docker:running_containers
  550. export -f docker:running_containers
  551. compose:project:containers() {
  552. local project="$1" opts
  553. opts+=(--filter label="com.docker.compose.project=$project")
  554. docker ps "${opts[@]}" \
  555. --format="{{.ID}}"
  556. }
  557. export -f compose:project:containers
  558. compose:charm:containers() {
  559. local project="$1" charm="$2"
  560. docker ps \
  561. --filter label="com.docker.compose.project=$project" \
  562. --filter label="compose.charm=$charm" \
  563. --format="{{.ID}}"
  564. }
  565. export -f compose:charm:containers
  566. compose:charm:container_one() {
  567. local project="$1" charm="$2" container_id
  568. {
  569. read-0a container_id || {
  570. err "charm ${DARKPINK}$charm${NORMAL} has no running container in project '$project'."
  571. return 1
  572. }
  573. if read-0a _; then
  574. err "charm ${DARKPINK}$charm${NORMAL} has more than one running container."
  575. return 1
  576. fi
  577. } < <(compose:charm:containers "$project" "$charm")
  578. echo "$container_id"
  579. }
  580. export -f compose:charm:container_one
  581. compose:charm:container_first() {
  582. local project="$1" charm="$2" container_id
  583. {
  584. read-0a container_id || {
  585. warn "charm ${DARKYELLOW}$charm${NORMAL} has no running container in project '$project'."
  586. }
  587. if read-0a _; then
  588. warn "charm ${DARKYELLOW}$charm${NORMAL} has more than one running container."
  589. fi
  590. } < <(compose:charm:containers "$project" "$charm")
  591. echo "$container_id"
  592. }
  593. export -f compose:charm:container_first
  594. compose:get_url() {
  595. local project_name="$1" service="$2" data_file network ip
  596. data_dir=("/var/lib/compose/relations/${project_name}/${service}-"*"/web-proxy")
  597. if [ "${#data_dir[@]}" -gt 1 ]; then
  598. err "More than one web-proxy relation." \
  599. "Current 'vps' algorithm is insufficient" \
  600. "to figure out which relation is concerned"
  601. return 1
  602. fi
  603. data_file="${data_dir[0]}/data"
  604. if [ -d "${data_file%/*}" ]; then
  605. (
  606. set -o pipefail
  607. ## users can't cat directly the content
  608. docker run --rm \
  609. -v "${data_file%/*}":/tmp/dummy alpine \
  610. cat "/tmp/dummy/${data_file##*/}" |
  611. shyaml get-value url
  612. )
  613. else
  614. ## Assume there are no frontend relation here, the url is direct IP
  615. container_id=$(compose:service:container_one "${project_name}" "${service}") || return 1
  616. network_ip=$(docker:container:network_ip_one "${container_id}") || return 1
  617. IFS=":" read -r network ip <<<"$network_ip"
  618. tcp_port=
  619. for port in $(docker:exposed_ports "$container_id"); do
  620. IFS="/" read port type <<<"$port"
  621. [ "$type" == "tcp" ] || continue
  622. tcp_port="$port"
  623. break
  624. done
  625. echo -n "http://$ip"
  626. [ -n "$tcp_port" ] && echo ":$tcp_port"
  627. fi || {
  628. err "Failed querying ${service} to frontend relation to get url."
  629. return 1
  630. }
  631. }
  632. export -f compose:get_url
  633. compose:container:service() {
  634. local container="$1" service
  635. if ! service=$(docker:container:label "$container" "compose.service"); then
  636. err "Failed to get service name from container ${container}."
  637. return 1
  638. fi
  639. if [ -z "$service" ]; then
  640. err "No service found for container ${container}."
  641. return 1
  642. fi
  643. echo "$service"
  644. }
  645. export -f compose:container:service
  646. compose:psql() {
  647. local project_name="$1" dbname="$2" container_id
  648. shift 2
  649. container_id=$(compose:charm:container_one "$project_name" "postgres") || return 1
  650. docker exec -i "${container_id}" psql -U postgres "$dbname" "$@"
  651. }
  652. export -f compose:psql
  653. compose:mongo() {
  654. local project_name="$1" dbname="$2" container_id
  655. container_id=$(compose:charm:container_one "$project_name" "mongo") || return 1
  656. docker exec -i "${container_id}" mongo --quiet "$dbname"
  657. }
  658. export -f compose:mongo
  659. compose:pgm() {
  660. local project_name="$1" container_network_ip container_ip container_network
  661. shift
  662. container_id=$(compose:charm:container_one "$project_name" "postgres") || return 1
  663. service_name=$(compose:container:service "$container_id") || return 1
  664. image_id=$(docker:container:image "$container_id") || return 1
  665. container_network_ip=$(docker:container:network_ip_one "$container_id") || return 1
  666. IFS=":" read -r container_network container_ip <<<"$container_network_ip"
  667. pgpass="/srv/datastore/data/${service_name}/var/lib/postgresql/data/pgpass"
  668. local final_pgm_docker_run_opts+=(
  669. -u 0 -e prefix_pg_local_command=" "
  670. --network "${container_network}"
  671. -e PGHOST="$container_ip"
  672. -e PGUSER=postgres
  673. -v "$pgpass:/root/.pgpass"
  674. "${pgm_docker_run_opts[@]}"
  675. )
  676. cmd=(docker run --rm \
  677. "${final_pgm_docker_run_opts[@]}" \
  678. "${image_id}" pgm "$@"
  679. )
  680. echo "${cmd[@]}"
  681. "${cmd[@]}"
  682. }
  683. export -f compose:pgm
  684. postgres:dump() {
  685. local project_name="$1" src="$2" dst="$3"
  686. (
  687. settmpdir PGM_TMP_LOCATION
  688. pgm_docker_run_opts=('-v' "${PGM_TMP_LOCATION}:/tmp/dump")
  689. compose:pgm "$project_name" cp -f "$src" "/tmp/dump/dump.gz" &&
  690. mv "$PGM_TMP_LOCATION/dump.gz" "$dst"
  691. ) || return 1
  692. }
  693. export -f postgres:dump
  694. postgres:restore() {
  695. local project_name="$1" src="$2" dst="$3"
  696. full_src_path=$(readlink -e "$src") || exit 1
  697. (
  698. pgm_docker_run_opts=('-v' "${full_src_path}:/tmp/dump.gz")
  699. compose:pgm "$project_name" cp -f "/tmp/dump.gz" "$dst"
  700. ) || return 1
  701. }
  702. export -f postgres:restore
  703. odoo:get_public_user_id() {
  704. local project_name="$1" dbname="$2"
  705. echo "select res_id from ir_model_data where model = 'res.users' and name = 'public_user';" |
  706. compose:psql "$project_name" "$dbname" -qAt
  707. }
  708. cyclos:set_root_url() {
  709. local project_name="$1" dbname="$2" url="$3"
  710. echo "UPDATE configurations SET root_url = '$url';" |
  711. compose:psql "$project_name" "$dbname" || {
  712. err "Failed to set cyclos url value in '$dbname' database."
  713. return 1
  714. }
  715. }
  716. export -f cyclos:set_root_url
  717. cyclos:unlock() {
  718. local project_name="$1" dbname="$2"
  719. echo "delete from database_lock;" |
  720. compose:psql "${project_name}" "${dbname}"
  721. }
  722. export -f cyclos:unlock
  723. rocketchat:drop-indexes() {
  724. local project_name="$1" dbname="$2"
  725. echo "db.users.dropIndexes()" |
  726. compose:mongo "${project_name}" "${dbname}"
  727. }
  728. export -f rocketchat:drop-indexes
  729. compose:project_name() {
  730. if [ -z "$PROJECT_NAME" ]; then
  731. PROJECT_NAME=$(compose --get-project-name) || {
  732. err "Couldn't get project name."
  733. return 1
  734. }
  735. if [ -z "$PROJECT_NAME" -o "$PROJECT_NAME" == "orphan" ]; then
  736. err "Couldn't get project name, probably because 'compose.yml' wasn't found."
  737. echo " Please ensure to either configure a global 'compose.yml' or run this command" >&2
  738. echo " in a compose project (with 'compose.yml' on the top level directory)." >&2
  739. return 1
  740. fi
  741. export PROJECT_NAME
  742. fi
  743. echo "$PROJECT_NAME"
  744. }
  745. export -f compose:project_name
  746. compose:get_cron_docker_cmd() {
  747. local cron_line cmd_line docker_cmd
  748. project_name=$(compose:project_name) || return 1
  749. if ! cron_line=$(docker exec "${project_name}"_cron_1 cat /etc/cron.d/rsync-backup | grep "\* \* \*"); then
  750. err "Can't find cron_line in cron container."
  751. echo " Have you forgotten to run 'compose up' ?" >&2
  752. return 1
  753. fi
  754. cron_line=${cron_line%|*}
  755. cron_line=${cron_line%"2>&1"*}
  756. cmd_line="${cron_line#*root}"
  757. eval "args=($cmd_line)"
  758. ## should be last argument
  759. docker_cmd=$(echo ${args[@]: -1})
  760. if ! [[ "$docker_cmd" == "docker run --rm -e "* ]]; then
  761. echo "docker command found should start with 'docker run'." >&2
  762. echo "Here's command:" >&2
  763. echo " $docker_cmd" >&2
  764. return 1
  765. fi
  766. e "$docker_cmd"
  767. }
  768. compose:recover-target() {
  769. local backup_host="$1" ident="$2" src="$3" dst="$4" service_name="${5:-rsync-backup}" project_name
  770. project_name=$(compose:project_name) || return 1
  771. docker_image="${project_name}_${service_name}"
  772. if ! docker_has_image "$docker_image"; then
  773. compose build "${service_name}" || {
  774. err "Couldn't find nor build image for service '$service_name'."
  775. return 1
  776. }
  777. fi
  778. dst="${dst%/}" ## remove final slash
  779. ssh_options=(-o StrictHostKeyChecking=no)
  780. if [[ "$backup_host" == *":"* ]]; then
  781. port="${backup_host##*:}"
  782. backup_host="${backup_host%%:*}"
  783. ssh_options+=(-p "$port")
  784. else
  785. port=""
  786. backup_host="${backup_host%%:*}"
  787. fi
  788. rsync_opts=(
  789. -e "ssh ${ssh_options[*]} -i /var/lib/rsync/.ssh/id_rsa -l rsync"
  790. -azvArH --delete --delete-excluded
  791. --partial --partial-dir .rsync-partial
  792. --numeric-ids
  793. )
  794. if [ "$DRY_RUN" ]; then
  795. rsync_opts+=("-n")
  796. fi
  797. cmd=(
  798. docker run --rm --entrypoint rsync \
  799. -v "/srv/datastore/config/${service_name}/var/lib/rsync":/var/lib/rsync \
  800. -v "${dst%/*}":/mnt/dest \
  801. "$docker_image" \
  802. "${rsync_opts[@]}" "$backup_host":"/var/mirror/$ident/$src" "/mnt/dest/${dst##*/}"
  803. )
  804. echo "${WHITE}Launching: ${NORMAL} ${cmd[@]}"
  805. "${cmd[@]}"
  806. }
  807. mailcow:recover-target() {
  808. local backup_host="$1" ident="$2" src="$3" dst="$4"
  809. dst="${dst%/}" ## remove final slash
  810. ssh_options=(-o StrictHostKeyChecking=no)
  811. if [[ "$backup_host" == *":"* ]]; then
  812. port="${backup_host##*:}"
  813. backup_host="${backup_host%%:*}"
  814. ssh_options+=(-p "$port")
  815. else
  816. port=""
  817. backup_host="${backup_host%%:*}"
  818. fi
  819. rsync_opts=(
  820. -e "ssh ${ssh_options[*]} -i /var/lib/rsync/.ssh/id_rsa -l rsync"
  821. -azvArH --delete --delete-excluded
  822. --partial --partial-dir .rsync-partial
  823. --numeric-ids
  824. )
  825. if [ "$DRY_RUN" ]; then
  826. rsync_opts+=("-n")
  827. fi
  828. cmd=(
  829. rsync "${rsync_opts[@]}" "$backup_host":"/var/mirror/$ident/$src" "${dst}"
  830. )
  831. echo "${WHITE}Launching: ${NORMAL} ${cmd[@]}"
  832. "${cmd[@]}"
  833. }
  834. nextcloud:src:version() {
  835. local version
  836. if ! version=$(cat "/srv/datastore/data/${nextcloud_service}/var/www/html/version.php" 2>/dev/null); then
  837. err "Can't find version.php file to get last version installed."
  838. exit 1
  839. fi
  840. version=$(e "$version" | grep 'VersionString =' | cut -f 3 -d ' ' | cut -f 2 -d "'")
  841. if [ -z "$version" ]; then
  842. err "Can't figure out version from version.php content."
  843. exit 1
  844. fi
  845. echo "$version"
  846. }
  847. container:health:check-fix:container-aliveness() {
  848. local container_id="$1"
  849. timeout 5s docker inspect "$container_id" >/dev/null 2>&1
  850. errlvl=$?
  851. if [ "$errlvl" == 124 ]; then
  852. service_name=$(docker ps --filter id="$container_id" --format '{{.Label "com.docker.compose.service"}}')
  853. container_name=($(docker ps --filter id="$container_id" --format '{{.Names}}'))
  854. pid=$(ps ax -o pid,command -ww | grep docker-containerd-shim |
  855. grep "/$container_id" |
  856. sed -r 's/^ *//g' |
  857. cut -f 1 -d " ")
  858. if [ -z "$pid" ]; then
  859. err "container ${DARKCYAN}${container_name[0]}${NORMAL} for ${DARKYELLOW}$service_name${NORMAL} doesn't answer to 'inspect' command. Can't find its PID neither."
  860. return 1
  861. fi
  862. echo "container ${DARKCYAN}${container_name[0]}${NORMAL} for ${DARKYELLOW}$service_name${NORMAL} doesn't answer to 'inspect' command (pid: $pid)."
  863. Wrap -d "kill pid $pid and restart" <<EOF
  864. kill "$pid"
  865. sleep 2
  866. docker restart "$container_id"
  867. EOF
  868. fi
  869. return $errlvl
  870. }
  871. container:health:check-fix:no-matching-entries() {
  872. local container_id="$1"
  873. out=$(docker exec "$container_id" echo 2>&1)
  874. errlvl=$?
  875. [ "$errlvl" == 0 ] && return 0
  876. service_name=$(docker ps --filter id="$container_id" --format '{{.Label "com.docker.compose.service"}}')
  877. container_name=($(docker ps --filter id="$container_id" --format '{{.Names}}'))
  878. if [ "$errlvl" == 126 ] && [[ "$out" == *"no matching entries in passwd file"* ]]; then
  879. echo "container ${DARKCYAN}${container_name[0]}${NORMAL} for ${DARKYELLOW}$service_name${NORMAL} has ${DARKRED}no-matching-entries${NORMAL} bug." >&2
  880. Wrap -d "restarting container of ${DARKYELLOW}$service_name${NORMAL} twice" <<EOF
  881. docker restart "$container_id"
  882. sleep 2
  883. docker restart "$container_id"
  884. EOF
  885. return $errlvl
  886. fi
  887. warn "Unknown issue with ${DARKYELLOW}$service_name${NORMAL}'s container:"
  888. echo " ${WHITE}cmd:${NORMAL} docker exec -ti $container_id echo" >&2
  889. echo "$out" | prefix " ${DARKGRAY}|${NORMAL} " >&2
  890. echo " ${DARKGRAY}..${NORMAL} leaving this as-is."
  891. return $errlvl
  892. }
  893. docker:api() {
  894. local endpoint="$1"
  895. curl -sS --unix-socket /var/run/docker.sock "http://localhost$endpoint"
  896. }
  897. docker:containers:id() {
  898. docker:api /containers/json | jq -r ".[] | .Id"
  899. }
  900. docker:containers:names() {
  901. docker:api /containers/json | jq -r '.[] | .Names[0] | ltrimstr("/")'
  902. }
  903. docker:container:stats() {
  904. container="$1"
  905. docker:api "/containers/$container/stats?stream=false"
  906. }
  907. docker:containers:stats() {
  908. :cache: scope=session
  909. local jobs='' line container id_names sha names name data service project
  910. local DC="com.docker.compose"
  911. local PSF_values=(
  912. ".ID" ".Names" ".Label \"$DC.project\"" ".Label \"$DC.service\"" ".Image"
  913. )
  914. local PSF="$(printf "{{%s}} " "${PSF_values[@]}")"
  915. id_names=$(docker ps -a --format="$PSF") || return 1
  916. ## Create a docker container table from name/sha to service, project, image_name
  917. declare -A resolve
  918. while read-0a line; do
  919. sha=${line%% *}; line=${line#* }
  920. names=${line%% *}; line=${line#* }
  921. names=(${names//,/ })
  922. for name in "${names[@]}"; do
  923. resolve["$name"]="$line"
  924. done
  925. resolve["$sha"]="$line"
  926. done < <(printf "%s\n" "$id_names")
  927. declare -A data
  928. while read-0a line; do
  929. name=${line%% *}; line=${line#* }
  930. ts=${line%% *}; line=${line#* }
  931. resolved="${resolve["$name"]}"
  932. project=${resolved%% *}; resolved=${resolved#* }
  933. service=${resolved%% *}; resolved=${resolved#* }
  934. image_name="$resolved"
  935. if [ -z "$service" ]; then
  936. project="@"
  937. service=$(docker inspect "$image_name" | jq -r '.[0].RepoTags[0]')
  938. service=${service//\//_}
  939. fi
  940. if [ -n "${data["$project/$service"]}" ]; then
  941. previous=(${data["$project/$service"]})
  942. previous=(${previous[@]:1})
  943. current=($line)
  944. sum=()
  945. i=0; max=${#previous[@]}
  946. while (( i < max )); do
  947. sum+=($((${previous[$i]} + ${current[$i]})))
  948. ((i++))
  949. done
  950. data["$project/$service"]="$ts ${sum[*]}"
  951. else
  952. data["$project/$service"]="$ts $line"
  953. fi
  954. done < <(
  955. for container in "$@"; do
  956. (
  957. docker:container:stats "${container}" |
  958. jq -r '
  959. (.name | ltrimstr("/"))
  960. + " " + (.read | sub("\\.[0-9]+Z"; "Z") | fromdate | tostring)
  961. + " " + (.memory_stats.usage | tostring)
  962. + " " + (.memory_stats.stats.inactive_file | tostring)
  963. + " " + ((.memory_stats.usage - .memory_stats.stats.inactive_file) | tostring)
  964. + " " + (.memory_stats.limit | tostring)
  965. + " " + (.networks.eth0.rx_bytes | tostring)
  966. + " " + (.networks.eth0.rx_packets | tostring)
  967. + " " + (.networks.eth0.rx_errors | tostring)
  968. + " " + (.networks.eth0.rx_dropped | tostring)
  969. + " " + (.networks.eth0.tx_bytes | tostring)
  970. + " " + (.networks.eth0.tx_packets | tostring)
  971. + " " + (.networks.eth0.tx_errors | tostring)
  972. + " " + (.networks.eth0.tx_dropped | tostring)
  973. '
  974. ) &
  975. jobs=1
  976. done
  977. [ -n "$jobs" ] && wait
  978. )
  979. for label in "${!data[@]}"; do
  980. echo "$label ${data[$label]}"
  981. done
  982. }
  983. decorator._mangle_fn docker:containers:stats
  984. export -f docker:containers:stats
  985. col:normalize:size() {
  986. local alignment=$1
  987. awk -v alignment="$alignment" '{
  988. # Store the entire line in the lines array.
  989. lines[NR] = $0;
  990. # Split the line into fields.
  991. split($0, fields);
  992. # Update max for each field.
  993. for (i = 1; i <= length(fields); i++) {
  994. if (length(fields[i]) > max[i]) {
  995. max[i] = length(fields[i]);
  996. }
  997. }
  998. }
  999. END {
  1000. # Print lines with fields padded to max.
  1001. for (i = 1; i <= NR; i++) {
  1002. split(lines[i], fields);
  1003. line = "";
  1004. for (j = 1; j <= length(fields); j++) {
  1005. # Get alignment for the current field.
  1006. align = substr(alignment, j, 1);
  1007. if (align != "+") {
  1008. align = "-"; # Default to left alignment if not "+".
  1009. }
  1010. line = line sprintf("%" align max[j] "s ", fields[j]);
  1011. }
  1012. print line;
  1013. }
  1014. }'
  1015. }
  1016. rrd:create() {
  1017. local prefix="$1"
  1018. shift
  1019. local label="$1" step="300" src_def
  1020. shift
  1021. if [ -z "$VAR_DIR" ]; then
  1022. err "Unset \$VAR_DIR, can't create rrd graph"
  1023. return 1
  1024. fi
  1025. mkdir -p "$VAR_DIR"
  1026. if ! [ -d "$VAR_DIR" ]; then
  1027. err "Invalid \$VAR_DIR: '$VAR_DIR' is not a directory"
  1028. return 1
  1029. fi
  1030. if ! type -p rrdtool >/dev/null 2>&1; then
  1031. apt-get install rrdtool -y --force-yes </dev/null
  1032. if ! type -p rrdtool 2>/dev/null 2>&1; then
  1033. err "Couldn't find nor install 'rrdtool'."
  1034. return 1
  1035. fi
  1036. fi
  1037. local RRD_PATH="$VAR_DIR/rrd"
  1038. local RRD_FILE="$RRD_PATH/$prefix/$label.rrd"
  1039. mkdir -p "${RRD_FILE%/*}"
  1040. if [ -f "$RRD_FILE" ]; then
  1041. err "File '$RRD_FILE' already exists, use a different label."
  1042. return 1
  1043. fi
  1044. local rrd_ds_opts=()
  1045. for src_def in "$@"; do
  1046. IFS=":" read -r name type min max rra_types <<<"$src_def"
  1047. rra_types=${rra_types:-average,max,min}
  1048. rrd_ds_opts+=("DS:$name:$type:900:$min:$max")
  1049. done
  1050. local step=120
  1051. local times=( ## with steps 120 is 2mn datapoint
  1052. 2m:1w
  1053. 6m:3w
  1054. 30m:12w
  1055. 3h:1y
  1056. 1d:10y
  1057. 1w:2080w
  1058. )
  1059. rrd_rra_opts=()
  1060. for time in "${times[@]}"; do
  1061. rrd_rra_opts+=("RRA:"{AVERAGE,MIN,MAX}":0.5:$time")
  1062. done
  1063. cmd=(
  1064. rrdtool create "$RRD_FILE" \
  1065. --step "$step" \
  1066. "${rrd_ds_opts[@]}" \
  1067. "${rrd_rra_opts[@]}"
  1068. )
  1069. "${cmd[@]}" || {
  1070. err "Failed command: ${cmd[@]}"
  1071. return 1
  1072. }
  1073. }
  1074. rrd:update() {
  1075. local prefix="$1"
  1076. shift
  1077. while read-0a data; do
  1078. [ -z "$data" ] && continue
  1079. IFS="~" read -ra data <<<"${data// /\~}"
  1080. label="${data[0]}"
  1081. ts="${data[1]}"
  1082. for arg in "$@"; do
  1083. IFS="|" read -r name arg <<<"$arg"
  1084. rrd_label="${label}/${name}"
  1085. rrd_create_opt=()
  1086. rrd_update_opt="$ts"
  1087. for col_def in ${arg//,/ }; do
  1088. col=${col_def%%:*}; create_def=${col_def#*:}
  1089. rrd_update_opt="${rrd_update_opt}:${data[$col]}"
  1090. rrd_create_opt+=("$create_def")
  1091. done
  1092. local RRD_ROOT_PATH="$VAR_DIR/rrd"
  1093. local RRD_PATH="$RRD_ROOT_PATH/${prefix%/}"
  1094. local RRD_FILE="${RRD_PATH%/}/${rrd_label#/}.rrd"
  1095. if ! [ -f "$RRD_FILE" ]; then
  1096. info "Creating new RRD file '${RRD_FILE#$RRD_ROOT_PATH/}'"
  1097. if ! rrd:create "$prefix" "${rrd_label}" "${rrd_create_opt[@]}" </dev/null ; then
  1098. err "Couldn't create new RRD file ${rrd_label} with options: '${rrd_create_opt[*]}'"
  1099. return 1
  1100. fi
  1101. fi
  1102. rrdtool update "$RRD_FILE" "$rrd_update_opt" || {
  1103. err "update failed with options: '$rrd_update_opt'"
  1104. return 1
  1105. }
  1106. done
  1107. done
  1108. }
  1109. [ "$SOURCED" ] && return 0
  1110. ##
  1111. ## Command line processing
  1112. ##
  1113. cmdline.spec.gnu
  1114. cmdline.spec.reporting
  1115. cmdline.spec.gnu install
  1116. cmdline.spec::cmd:install:run() {
  1117. :
  1118. }
  1119. cmdline.spec.gnu get-type
  1120. cmdline.spec::cmd:get-type:run() {
  1121. vps:get-type
  1122. }
  1123. cmdline.spec:install:cmd:backup:run() {
  1124. : :posarg: BACKUP_SERVER 'Target backup server'
  1125. : :optfla: --ignore-domain-check \
  1126. "Allow to bypass the domain check in
  1127. compose file (only used in compose
  1128. installation)."
  1129. : :optfla: --ignore-ping-check "Allow to bypass the ping check of host."
  1130. local vps_type
  1131. vps_type=$(vps:get-type) || {
  1132. err "Failed to get type of installation."
  1133. return 1
  1134. }
  1135. if ! fn.exists "${vps_type}:install-backup"; then
  1136. err "type '${vps_type}' has no backup installation implemented yet."
  1137. return 1
  1138. fi
  1139. opts=()
  1140. [ "$opt_ignore_ping_check" ] &&
  1141. opts+=("--ignore-ping-check")
  1142. if [ "$vps_type" == "compose" ]; then
  1143. [ "$opt_ignore_domain_check" ] &&
  1144. opts+=("--ignore-domain-check")
  1145. fi
  1146. "cmdline.spec:install:cmd:${vps_type}-backup:run" "${opts[@]}" "$BACKUP_SERVER"
  1147. }
  1148. DEFAULT_BACKUP_SERVICE_NAME=rsync-backup
  1149. cmdline.spec.gnu compose-backup
  1150. cmdline.spec:install:cmd:compose-backup:run() {
  1151. : :posarg: BACKUP_SERVER 'Target backup server'
  1152. : :optval: --service-name,-s "YAML service name in compose
  1153. file to check for existence of key.
  1154. Defaults to '$DEFAULT_BACKUP_SERVICE_NAME'"
  1155. : :optval: --compose-file,-f "Compose file location. Defaults to
  1156. the value of '\$DEFAULT_COMPOSE_FILE'"
  1157. : :optfla: --ignore-domain-check \
  1158. "Allow to bypass the domain check in
  1159. compose file."
  1160. : :optfla: --ignore-ping-check "Allow to bypass the ping check of host."
  1161. local service_name compose_file
  1162. [ -e "/etc/compose/local.conf" ] && source /etc/compose/local.conf
  1163. compose_file=${opt_compose_file:-$DEFAULT_COMPOSE_FILE}
  1164. service_name=${opt_service_name:-$DEFAULT_BACKUP_SERVICE_NAME}
  1165. if ! [ -e "$compose_file" ]; then
  1166. err "Compose file not found in '$compose_file'."
  1167. return 1
  1168. fi
  1169. compose:install-backup "$BACKUP_SERVER" "$service_name" "$compose_file" \
  1170. "$opt_ignore_ping_check" "$opt_ignore_domain_check"
  1171. }
  1172. cmdline.spec:install:cmd:mailcow-backup:run() {
  1173. : :posarg: BACKUP_SERVER 'Target backup server'
  1174. : :optfla: --ignore-ping-check "Allow to bypass the ping check of host."
  1175. "mailcow:install-backup" "$BACKUP_SERVER" "$opt_ignore_ping_check"
  1176. }
  1177. cmdline.spec.gnu backup
  1178. cmdline.spec::cmd:backup:run() {
  1179. local vps_type
  1180. vps_type=$(vps:get-type) || {
  1181. err "Failed to get type of installation."
  1182. return 1
  1183. }
  1184. if ! fn.exists "cmdline.spec:backup:cmd:${vps_type}:run"; then
  1185. err "type '${vps_type}' has no backup process implemented yet."
  1186. return 1
  1187. fi
  1188. "cmdline.spec:backup:cmd:${vps_type}:run"
  1189. }
  1190. cmdline.spec:backup:cmd:mailcow:run() {
  1191. local cmd_line cron_line cmd
  1192. for f in mysql-backup mirror-dir; do
  1193. [ -e "/etc/cron.d/$f" ] || {
  1194. err "Can't find '/etc/cron.d/$f'."
  1195. echo " Have you forgotten to run 'vps install backup BACKUP_HOST' ?" >&2
  1196. return 1
  1197. }
  1198. if ! cron_line=$(cat "/etc/cron.d/$f" |
  1199. grep -v "^#" | grep "\* \* \*"); then
  1200. err "Can't find cron_line in '/etc/cron.d/$f'." \
  1201. "Have you modified it ?"
  1202. return 1
  1203. fi
  1204. cron_line=${cron_line%|*}
  1205. cmd_line=(${cron_line#*root})
  1206. if [ "$f" == "mirror-dir" ]; then
  1207. cmd=()
  1208. for arg in "${cmd_line[@]}"; do
  1209. [ "$arg" != "-q" ] && cmd+=("$arg")
  1210. done
  1211. else
  1212. cmd=("${cmd_line[@]}")
  1213. fi
  1214. code="${cmd[*]}"
  1215. echo "${WHITE}Launching:${NORMAL} ${code}"
  1216. {
  1217. {
  1218. (
  1219. ## Some commands are using colors that are already
  1220. ## set by this current program and will trickle
  1221. ## down unwantedly
  1222. ansi_color no
  1223. eval "${code}"
  1224. ) | sed -r "s/^/ ${GRAY}|${NORMAL} /g"
  1225. set_errlvl "${PIPESTATUS[0]}"
  1226. } 3>&1 1>&2 2>&3 | sed -r "s/^/ $DARKRED\!$NORMAL /g"
  1227. set_errlvl "${PIPESTATUS[0]}"
  1228. } 3>&1 1>&2 2>&3
  1229. if [ "$?" != "0" ]; then
  1230. err "Failed."
  1231. return 1
  1232. fi
  1233. done
  1234. info "Mysql backup and subsequent mirror-dir ${DARKGREEN}succeeded${NORMAL}."
  1235. }
  1236. set_errlvl() { return "${1:-1}"; }
  1237. cmdline.spec:backup:cmd:compose:run() {
  1238. local cron_line args
  1239. project_name=$(compose:project_name) || return 1
  1240. docker_cmd=$(compose:get_cron_docker_cmd) || return 1
  1241. echo "${WHITE}Launching:${NORMAL} docker exec -i "${project_name}_cron_1" $docker_cmd"
  1242. {
  1243. {
  1244. eval "docker exec -i \"${project_name}_cron_1\" $docker_cmd" | sed -r "s/^/ ${GRAY}|${NORMAL} /g"
  1245. set_errlvl "${PIPESTATUS[0]}"
  1246. } 3>&1 1>&2 2>&3 | sed -r "s/^/ $DARKRED\!$NORMAL /g"
  1247. set_errlvl "${PIPESTATUS[0]}"
  1248. } 3>&1 1>&2 2>&3
  1249. if [ "$?" != "0" ]; then
  1250. err "Failed."
  1251. return 1
  1252. fi
  1253. info "mirror-dir ${DARKGREEN}succeeded${NORMAL}."
  1254. }
  1255. cmdline.spec.gnu recover-target
  1256. cmdline.spec::cmd:recover-target:run() {
  1257. : :posarg: BACKUP_DIR 'Source directory on backup side'
  1258. : :posarg: HOST_DIR 'Target directory on host side'
  1259. : :optval: --backup-host,-B "The backup host"
  1260. : :optfla: --dry-run,-n "Don't do anything, instead tell what it
  1261. would do."
  1262. ## if no backup host take the one by default
  1263. backup_host="$opt_backup_host"
  1264. if [ -z "$backup_host" ]; then
  1265. backup_host_ident=$(backup-action get_default_backup_host_ident) || return 1
  1266. read -r backup_host ident <<<"$backup_host_ident"
  1267. fi
  1268. if [[ "$BACKUP_DIR" == /* ]]; then
  1269. err "BACKUP_DIR must be a relative path from the root of your backup."
  1270. return 1
  1271. fi
  1272. REAL_HOST_DIR=$(realpath "$HOST_DIR") || {
  1273. err "Can't find HOST_DIR '$HOST_DIR'."
  1274. return 1
  1275. }
  1276. export DRY_RUN="${opt_dry_run}"
  1277. backup-action recover-target "$backup_host" "$ident" "$BACKUP_DIR" "$REAL_HOST_DIR"
  1278. }
  1279. cmdline.spec.gnu odoo
  1280. cmdline.spec::cmd:odoo:run() {
  1281. :
  1282. }
  1283. cmdline.spec.gnu restart
  1284. cmdline.spec:odoo:cmd:restart:run() {
  1285. : :optval: --service,-s "The service (defaults to 'odoo')"
  1286. local out odoo_service
  1287. odoo_service="${opt_service:-odoo}"
  1288. project_name=$(compose:project_name) || return 1
  1289. if ! out=$(docker restart "${project_name}_${odoo_service}_1" 2>&1); then
  1290. if [[ "$out" == *"no matching entries in passwd file" ]]; then
  1291. warn "Catched docker bug. Restarting once more."
  1292. if ! out=$(docker restart "${project_name}_${odoo_service}_1"); then
  1293. err "Can't restart container ${project_name}_${odoo_service}_1 (restarted twice)."
  1294. echo " output:" >&2
  1295. echo "$out" | prefix " ${GRAY}|${NORMAL} " >&2
  1296. exit 1
  1297. fi
  1298. else
  1299. err "Couldn't restart container ${project_name}_${odoo_service}_1 (and no restart bug detected)."
  1300. exit 1
  1301. fi
  1302. fi
  1303. info "Container ${project_name}_${odoo_service}_1 was ${DARKGREEN}successfully${NORMAL} restarted."
  1304. }
  1305. cmdline.spec.gnu restore
  1306. cmdline.spec:odoo:cmd:restore:run() {
  1307. : :posarg: ZIP_DUMP_LOCATION 'Source odoo dump file to restore
  1308. (can be a local file or an url)'
  1309. : :optval: --service,-s "The service (defaults to 'odoo')"
  1310. : :optval: --database,-d 'Target database (default if not specified)'
  1311. local out
  1312. odoo_service="${opt_service:-odoo}"
  1313. if [[ "$ZIP_DUMP_LOCATION" == "http://"* ]] ||
  1314. [[ "$ZIP_DUMP_LOCATION" == "https://"* ]]; then
  1315. settmpdir ZIP_TMP_LOCATION
  1316. tmp_location="$ZIP_TMP_LOCATION/dump.zip"
  1317. curl -k -s -L "$ZIP_DUMP_LOCATION" > "$tmp_location" || {
  1318. err "Couldn't get '$ZIP_DUMP_LOCATION'."
  1319. exit 1
  1320. }
  1321. if [[ "$(dd if="$tmp_location" count=2 bs=1 2>/dev/null)" != "PK" ]]; then
  1322. err "Download doesn't seem to be a zip file."
  1323. dd if="$tmp_location" count=1 bs=256 | hd | prefix " ${GRAY}|${NORMAL} " >&2
  1324. exit 1
  1325. fi
  1326. info "Successfully downloaded '$ZIP_DUMP_LOCATION'"
  1327. echo " in '$tmp_location'." >&2
  1328. ZIP_DUMP_LOCATION="$tmp_location"
  1329. fi
  1330. [ -e "$ZIP_DUMP_LOCATION" ] || {
  1331. err "No file '$ZIP_DUMP_LOCATION' found." >&2
  1332. exit 1
  1333. }
  1334. #cmdline.spec:odoo:cmd:restart:run --service "$odoo_service" || exit 1
  1335. msg_dbname=default
  1336. [ -n "$opt_database" ] && msg_dbname="'$opt_database'"
  1337. compose --no-hooks drop "$odoo_service" $opt_database || {
  1338. err "Error dropping $msg_dbname database of service ${DARKYELLOW}$odoo_service${NORMAL}."
  1339. exit 1
  1340. }
  1341. compose --no-hooks load "$odoo_service" $opt_database < "$ZIP_DUMP_LOCATION" || {
  1342. err "Error restoring service ${DARKYELLOW}$odoo_service${NORMAL} to $msg_dbname database."
  1343. exit 1
  1344. }
  1345. info "Successfully restored ${DARKYELLOW}$odoo_service${NORMAL} to $msg_dbname database."
  1346. ## Restart odoo, ensure there is no bugs lingering on it.
  1347. cmdline.spec:odoo:cmd:restart:run --service "$odoo_service" || exit 1
  1348. }
  1349. cmdline.spec.gnu dump
  1350. cmdline.spec:odoo:cmd:dump:run() {
  1351. : :posarg: DUMP_ZIPFILE 'Target path to store odoo dump zip file.'
  1352. : :optval: --database,-d 'Target database (default if not specified)'
  1353. : :optval: --service,-s "The service (defaults to 'odoo')"
  1354. odoo_service="${opt_service:-odoo}"
  1355. msg_dbname=default
  1356. [ -n "$opt_database" ] && msg_dbname="'$opt_database'"
  1357. compose --no-hooks save "$odoo_service" $opt_database > "$DUMP_ZIPFILE" || {
  1358. err "Error dumping ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database to '$DUMP_ZIPFILE'."
  1359. exit 1
  1360. }
  1361. info "Successfully dumped ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database to '$DUMP_ZIPFILE'."
  1362. }
  1363. cmdline.spec.gnu drop
  1364. cmdline.spec:odoo:cmd:drop:run() {
  1365. : :optval: --database,-d 'Target database (default if not specified)'
  1366. : :optval: --service,-s "The service (defaults to 'odoo')"
  1367. odoo_service="${opt_service:-odoo}"
  1368. msg_dbname=default
  1369. [ -n "$opt_database" ] && msg_dbname="'$opt_database'"
  1370. compose --no-hooks drop "$odoo_service" $opt_database || {
  1371. err "Error dropping ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database."
  1372. exit 1
  1373. }
  1374. info "Successfully dropped ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database."
  1375. }
  1376. cmdline.spec.gnu set-cyclos-url
  1377. cmdline.spec:odoo:cmd:set-cyclos-url:run() {
  1378. : :optval: --database,-d "Target database ('odoo' if not specified)"
  1379. : :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
  1380. local URL
  1381. dbname=${opt_database:-odoo}
  1382. cyclos_service="${opt_service:-cyclos}"
  1383. project_name=$(compose:project_name) || exit 1
  1384. URL=$(compose:get_url "${project_name}" "${cyclos_service}") || exit 1
  1385. Wrap -d "set cyclos url to '$URL'" <<EOF || exit 1
  1386. echo "UPDATE res_company SET cyclos_server_url = '$URL/api' WHERE id=1;" |
  1387. compose:psql "$project_name" "$dbname" || {
  1388. err "Failed to set cyclos url value in '$dbname' database."
  1389. exit 1
  1390. }
  1391. EOF
  1392. }
  1393. cmdline.spec.gnu fix-sso
  1394. cmdline.spec:odoo:cmd:fix-sso:run() {
  1395. : :optval: --database,-d "Target database ('odoo' if not specified)"
  1396. local public_user_id project_name dbname
  1397. dbname=${opt_database:-odoo}
  1398. project_name=$(compose:project_name) || exit 1
  1399. public_user_id=$(odoo:get_public_user_id "${project_name}" "${dbname}") || exit 1
  1400. Wrap -d "fix website's object to 'public_user' (id=$public_user_id)" <<EOF || exit 1
  1401. echo "UPDATE website SET user_id = $public_user_id;" |
  1402. compose:psql "$project_name" "$dbname" || {
  1403. err "Failed to set website's object user_id to public user's id ($public_user_id) in '$dbname' database."
  1404. exit 1
  1405. }
  1406. EOF
  1407. }
  1408. cmdline.spec.gnu cyclos
  1409. cmdline.spec::cmd:cyclos:run() {
  1410. :
  1411. }
  1412. cmdline.spec:cyclos:cmd:dump:run() {
  1413. : :posarg: DUMP_GZFILE 'Target path to store odoo dump gz file.'
  1414. : :optval: --database,-d "Target database ('cyclos' if not specified)"
  1415. : :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
  1416. cyclos_service="${opt_service:-cyclos}"
  1417. cyclos_database="${opt_database:-cyclos}"
  1418. project_name=$(compose:project_name) || exit 1
  1419. container_id=$(compose:service:container_one "$project_name" "${cyclos_service}") || exit 1
  1420. Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1421. docker stop "$container_id" || exit 1
  1422. Wrap -d "Dump postgres database '${cyclos_database}'." -- \
  1423. postgres:dump "${project_name}" "$cyclos_database" "$DUMP_GZFILE" || exit 1
  1424. Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1425. docker start "${container_id}" || exit 1
  1426. }
  1427. cmdline.spec.gnu restore
  1428. cmdline.spec:cyclos:cmd:restore:run() {
  1429. : :posarg: GZ_DUMP_LOCATION 'Source cyclos dump file to restore
  1430. (can be a local file or an url)'
  1431. : :optval: --service,-s "The service (defaults to 'cyclos')"
  1432. : :optval: --database,-d 'Target database (default if not specified)'
  1433. local out
  1434. cyclos_service="${opt_service:-cyclos}"
  1435. cyclos_database="${opt_database:-cyclos}"
  1436. project_name=$(compose:project_name) || exit 1
  1437. url=$(compose:get_url "${project_name}" "${cyclos_service}") || return 1
  1438. container_id=$(compose:service:container_one "$project_name" "${cyclos_service}") || exit 1
  1439. if [[ "$GZ_DUMP_LOCATION" == "http://"* ]] ||
  1440. [[ "$GZ_DUMP_LOCATION" == "https://"* ]]; then
  1441. settmpdir GZ_TMP_LOCATION
  1442. tmp_location="$GZ_TMP_LOCATION/dump.gz"
  1443. Wrap -d "get '$GZ_DUMP_LOCATION'" <<EOF || exit 1
  1444. ## Note that curll version before 7.76.0 do not have
  1445. curl -k -s -L "$GZ_DUMP_LOCATION" --fail \\
  1446. > "$tmp_location" || {
  1447. echo "Error fetching ressource. Is url correct ?" >&2
  1448. exit 1
  1449. }
  1450. if [[ "\$(dd if="$tmp_location" count=2 bs=1 2>/dev/null |
  1451. hexdump -v -e "/1 \"%02x\"")" != "1f8b" ]]; then
  1452. err "Download doesn't seem to be a gzip file."
  1453. dd if="$tmp_location" count=1 bs=256 | hd | prefix " ${GRAY}|${NORMAL} " >&2
  1454. exit 1
  1455. fi
  1456. EOF
  1457. GZ_DUMP_LOCATION="$tmp_location"
  1458. fi
  1459. [ -e "$GZ_DUMP_LOCATION" ] || {
  1460. err "No file '$GZ_DUMP_LOCATION' found." >&2
  1461. exit 1
  1462. }
  1463. Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1464. docker stop "$container_id" || exit 1
  1465. ## XXXvlab: making the assumption that the postgres username should
  1466. ## be the same as the cyclos service selected (which is the default,
  1467. ## but not always the case).
  1468. Wrap -d "restore postgres database '${cyclos_database}'." -- \
  1469. postgres:restore "$project_name" "$GZ_DUMP_LOCATION" "${cyclos_service}@${cyclos_database}" || exit 1
  1470. ## ensure that the database is not locked
  1471. Wrap -d "check and remove database lock if any" -- \
  1472. cyclos:unlock "${project_name}" "${cyclos_database}" || exit 1
  1473. Wrap -d "set root url to '$url'" -- \
  1474. cyclos:set_root_url "${project_name}" "${cyclos_database}" "${url}" || exit 1
  1475. Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1476. docker start "${container_id}" || exit 1
  1477. }
  1478. cmdline.spec.gnu set-root-url
  1479. cmdline.spec:cyclos:cmd:set-root-url:run() {
  1480. : :optval: --database,-d "Target database ('cyclos' if not specified)"
  1481. : :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
  1482. local URL
  1483. cyclos_database=${opt_database:-cyclos}
  1484. cyclos_service="${opt_service:-cyclos}"
  1485. project_name=$(compose:project_name) || exit 1
  1486. url=$(compose:get_url "${project_name}" "${cyclos_service}") || exit 1
  1487. container_id=$(compose:service:container_one "${project_name}" "${cyclos_service}") || exit 1
  1488. Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1489. docker stop "$container_id" || exit 1
  1490. Wrap -d "set root url to '$url'" -- \
  1491. cyclos:set_root_url "${project_name}" "${cyclos_database}" "${url}" || exit 1
  1492. Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1493. docker start "${container_id}" || exit 1
  1494. }
  1495. cmdline.spec.gnu unlock
  1496. cmdline.spec:cyclos:cmd:unlock:run() {
  1497. : :optval: --database,-d "Target database ('cyclos' if not specified)"
  1498. : :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
  1499. local URL
  1500. cyclos_database=${opt_database:-cyclos}
  1501. cyclos_service="${opt_service:-cyclos}"
  1502. project_name=$(compose:project_name) || exit 1
  1503. container_id=$(compose:service:container_one "${project_name}" "${cyclos_service}") || exit 1
  1504. Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1505. docker stop "$container_id" || exit 1
  1506. Wrap -d "check and remove database lock if any" -- \
  1507. cyclos:unlock "${project_name}" "${cyclos_database}" || exit 1
  1508. Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1509. docker start "${container_id}" || exit 1
  1510. }
  1511. cmdline.spec.gnu rocketchat
  1512. cmdline.spec::cmd:rocketchat:run() {
  1513. :
  1514. }
  1515. cmdline.spec.gnu drop-indexes
  1516. cmdline.spec:rocketchat:cmd:drop-indexes:run() {
  1517. : :optval: --database,-d "Target database ('rocketchat' if not specified)"
  1518. : :optval: --service,-s "The rocketchat service name (defaults to 'rocketchat')"
  1519. local URL
  1520. rocketchat_database=${opt_database:-rocketchat}
  1521. rocketchat_service="${opt_service:-rocketchat}"
  1522. project_name=$(compose:project_name) || exit 1
  1523. container_id=$(compose:service:container_one "${project_name}" "${rocketchat_service}") || exit 1
  1524. Wrap -d "stop ${DARKYELLOW}${rocketchat_service}${NORMAL}'s container" -- \
  1525. docker stop "$container_id" || exit 1
  1526. errlvl=0
  1527. Wrap -d "drop indexes" -- \
  1528. rocketchat:drop-indexes "${project_name}" "${rocketchat_database}" || {
  1529. errlvl=1
  1530. errmsg="Failed to drop indexes"
  1531. }
  1532. Wrap -d "start ${DARKYELLOW}${rocketchat_service}${NORMAL}'s container" -- \
  1533. docker start "${container_id}" || exit 1
  1534. if [ "$errlvl" != 0 ]; then
  1535. err "$errmsg"
  1536. fi
  1537. exit "$errlvl"
  1538. }
  1539. cmdline.spec.gnu nextcloud
  1540. cmdline.spec::cmd:nextcloud:run() {
  1541. :
  1542. }
  1543. cmdline.spec.gnu upgrade
  1544. cmdline.spec:nextcloud:cmd:upgrade:run() {
  1545. : :posarg: [TARGET_VERSION] "Target version to migrate to"
  1546. : :optval: --service,-s "The nexcloud service name (defaults to 'nextcloud')"
  1547. local URL
  1548. nextcloud_service="${opt_service:-nextcloud}"
  1549. project_name=$(compose:project_name) || exit 1
  1550. containers=$(compose:service:containers "${project_name}" "${nextcloud_service}") || exit 1
  1551. container_stopped=()
  1552. if [ -n "$containers" ]; then
  1553. for container in $containers; do
  1554. Wrap -d "stop ${DARKYELLOW}${nextcloud_service}${NORMAL}'s container" -- \
  1555. docker stop "$container" || {
  1556. err "Failed to stop container '$container'."
  1557. exit 1
  1558. }
  1559. container_stopped+=("$container")
  1560. done
  1561. fi
  1562. before_version=$(nextcloud:src:version) || exit 1
  1563. ## -q to remove the display of ``compose`` related information
  1564. ## like relation resolution.
  1565. ## --no-hint to remove the final hint about modifying your
  1566. ## ``compose.yml``.
  1567. compose -q upgrade "$nextcloud_service" --no-hint "$TARGET_VERSION"
  1568. errlvl="$?"
  1569. after_version=$(nextcloud:src:version)
  1570. if [ "$after_version" != "$before_version" ]; then
  1571. desc="update \`compose.yml\` to set ${DARKYELLOW}$nextcloud_service${NORMAL}'s "
  1572. desc+="docker image to actual code version ${WHITE}${after_version}${NORMAL}"
  1573. Wrap -d "$desc" -- \
  1574. compose:file:value-change \
  1575. "${nextcloud_service}.docker-compose.image" \
  1576. "docker.0k.io/nextcloud:${after_version}-myc" || exit 1
  1577. fi
  1578. if [ "$errlvl" == 0 ]; then
  1579. echo "${WHITE}Launching final compose${NORMAL}"
  1580. compose up || exit 1
  1581. fi
  1582. exit "$errlvl"
  1583. }
  1584. cmdline.spec.gnu check-fix
  1585. cmdline.spec::cmd:check-fix:run() {
  1586. : :posarg: [SERVICES...] "Optional service to check"
  1587. : :optval: --check,-c "Specify a check or a list of checks separated by commas"
  1588. : :optfla: --silent,-s "Don't ouput anything if everything goes well"
  1589. local project_name service_name containers container check
  1590. all_checks=$(declare -F |
  1591. egrep '^declare -fx? container:health:check-fix:[^ ]+$' |
  1592. cut -f 4 -d ":")
  1593. checks=(${opt_check//,/ })
  1594. for check in "${checks[@]}"; do
  1595. fn.exists container:health:check-fix:$check || {
  1596. err "check '$check' not found."
  1597. return 1
  1598. }
  1599. done
  1600. if [ "${#checks[*]}" == 0 ]; then
  1601. checks=($all_checks)
  1602. fi
  1603. ## XXXvlab: could make it parallel
  1604. project_name=$(compose:project_name) || exit 1
  1605. containers=($(compose:project:containers "${project_name}")) || exit 1
  1606. found=
  1607. for container in "${containers[@]}"; do
  1608. service_name=$(docker ps --filter id="$container" --format '{{.Label "com.docker.compose.service"}}')
  1609. if [ "${#SERVICES[@]}" -gt 0 ]; then
  1610. [[ " ${SERVICES[*]} " == *" $service_name "* ]] || continue
  1611. fi
  1612. found=1
  1613. one_bad=
  1614. for check in "${checks[@]}"; do
  1615. if ! container:health:check-fix:"$check" "$container"; then
  1616. one_bad=1
  1617. fi
  1618. done
  1619. if [ -z "$opt_silent" ] && [ -z "$one_bad" ]; then
  1620. Elt "containers have been checked for ${DARKYELLOW}$service_name${NORMAL}"
  1621. Feedback
  1622. fi
  1623. done
  1624. if [ -z "$found" ]; then
  1625. if [ -z "$opt_silent" ]; then
  1626. if [ "${#SERVICES[@]}" -gt 0 ]; then
  1627. warn "No container for given services found in current project '$project_name'."
  1628. else
  1629. warn "No container found for current project '$project_name'."
  1630. fi
  1631. fi
  1632. return 1
  1633. fi
  1634. }
  1635. awk:require() {
  1636. local require_at_least="$1" version already_installed
  1637. while true; do
  1638. if ! version=$(awk --version 2>/dev/null); then
  1639. version=""
  1640. else
  1641. version=${version%%,*}
  1642. version=${version##* }
  1643. fi
  1644. if [ -z "$version" ] || version_gt "$require_at_least" "$version"; then
  1645. if [ -z "$already_installed" ]; then
  1646. if [ -z "$version" ]; then
  1647. info "No 'gawk' available, probably using a clone. Installing 'gawk'..."
  1648. else
  1649. info "Found gawk version '$version'. Updating 'gawk'..."
  1650. fi
  1651. apt-get install gawk -y </dev/null || {
  1652. err "Failed to install 'gawk'."
  1653. return 1
  1654. }
  1655. already_installed=true
  1656. else
  1657. if [ -z "$version" ]; then
  1658. err "No 'gawk' available even after having installed one"
  1659. else
  1660. err "'gawk' version '$version' is lower than required" \
  1661. "'$require_at_least' even after updating 'gawk'."
  1662. fi
  1663. return 1
  1664. fi
  1665. continue
  1666. fi
  1667. return 0
  1668. done
  1669. }
  1670. cmdline.spec.gnu stats
  1671. cmdline.spec::cmd:stats:run() {
  1672. : :optval: --format,-f "Either 'silent', 'raw', or 'pretty', default is pretty."
  1673. : :optfla: --silent,-s "Shorthand for '--format silent'"
  1674. : :optval: --resource,-r 'resource(s) separated with a comma'
  1675. local project_name service_name containers container check
  1676. if [[ -n "${opt_silent}" ]]; then
  1677. if [[ -n "${opt_format}" ]]; then
  1678. err "'--silent' conflict with option '--format'."
  1679. return 1
  1680. fi
  1681. opt_format=s
  1682. fi
  1683. opt_format="${opt_format:-pretty}"
  1684. case "${opt_format}" in
  1685. raw|r)
  1686. opt_format="raw"
  1687. :
  1688. ;;
  1689. silent|s)
  1690. opt_format="silent"
  1691. ;;
  1692. pretty|p)
  1693. opt_format="pretty"
  1694. awk:require 4.1.4 || return 1
  1695. ;;
  1696. *)
  1697. err "Invalid value '$opt_format' for option --format"
  1698. echo " use either 'raw' (shorthand 'r'), 'silent' (shorthand 's') or pretty (shorthand 'p')." >&2
  1699. return 1
  1700. esac
  1701. local resources=(c.{memory,network} load_avg)
  1702. if [ -n "${opt_resource}" ]; then
  1703. resources=(${opt_resource//,/ })
  1704. fi
  1705. local not_found=()
  1706. for resource in "${resources[@]}"; do
  1707. if ! fn.exists "stats:$resource"; then
  1708. not_found+=("$resource")
  1709. fi
  1710. done
  1711. if [[ "${#not_found[@]}" -gt 0 ]]; then
  1712. not_found_msg=$(printf "%s, " "${not_found[@]}")
  1713. not_found_msg=${not_found_msg%, }
  1714. err "Unsupported resource(s) provided: ${not_found_msg}"
  1715. echo " resource must be one-of:" >&2
  1716. declare -F | egrep -- '-fx? stats:[a-zA-Z0-9_.]+$' | cut -f 3- -d " " | cut -f 2- -d ":" | prefix " - " >&2
  1717. return 1
  1718. fi
  1719. :state-dir:
  1720. for resource in "${resources[@]}"; do
  1721. if [ "$opt_format" == "pretty" ]; then
  1722. echo "${WHITE}$resource${NORMAL}:"
  1723. stats:"$resource" "$opt_format" 2>&1 | prefix " "
  1724. else
  1725. stats:"$resource" "$opt_format" 2>&1 | prefix "$resource "
  1726. fi
  1727. set_errlvl "${PIPESTATUS[0]}" || return 1
  1728. done
  1729. }
  1730. stats:c.memory() {
  1731. local format="$1"
  1732. local out
  1733. container_to_check=($(docker:running_containers)) || exit 1
  1734. out=$(docker:containers:stats "${container_to_check[@]}")
  1735. printf "%s\n" "$out" | rrd:update "containers" "memory|3:usage:GAUGE:U:U,4:inactive:GAUGE:U:U" || {
  1736. return 1
  1737. }
  1738. case "${format:-p}" in
  1739. raw|r)
  1740. printf "%s\n" "$out" | cut -f 1-5 -d " "
  1741. ;;
  1742. pretty|p)
  1743. awk:require 4.1.4 || return 1
  1744. {
  1745. echo "container" "__total____" "buffered____" "resident____"
  1746. printf "%s\n" "$out" |
  1747. awk '
  1748. {
  1749. offset = strftime("%z", $2);
  1750. print $1, substr($0, index($0,$3));
  1751. }' | cut -f 1-4 -d " " |
  1752. numfmt --field 2-4 --to=iec-i --format=%8.1fB |
  1753. sed -r 's/(\.[0-9])([A-Z]?iB)/\1:\2/g' |
  1754. sort
  1755. } | col:normalize:size -+++ |
  1756. sed -r 's/(\.[0-9]):([A-Z]?iB)/\1 \2/g' |
  1757. header:make
  1758. ;;
  1759. esac
  1760. }
  1761. stats:c.network() {
  1762. local format="$1"
  1763. local out
  1764. container_to_check=($(docker:running_containers)) || exit 1
  1765. out=$(docker:containers:stats "${container_to_check[@]}")
  1766. cols=(
  1767. {rx,tx}_{bytes,packets,errors,dropped}
  1768. )
  1769. idx=5 ## starting column idx for next fields
  1770. defs=()
  1771. for col in "${cols[@]}"; do
  1772. defs+=("$((idx++)):${col}:COUNTER:U:U")
  1773. done
  1774. OLDIFS="$IFS"
  1775. IFS="," defs="${defs[*]}"
  1776. IFS="$OLDIFS"
  1777. printf "%s\n" "$out" |
  1778. rrd:update "containers" \
  1779. "network|${defs}" || {
  1780. return 1
  1781. }
  1782. case "${format:-p}" in
  1783. raw|r)
  1784. printf "%s\n" "$out" | cut -f 1,2,7- -d " "
  1785. ;;
  1786. pretty|p)
  1787. awk:require 4.1.4 || return 1
  1788. {
  1789. echo "container" "_" "_" "_" "RX" "_" "_" "_" "TX"
  1790. echo "_" "__bytes____" "__packets" "__errors" "__dropped" "__bytes____" "__packets" "__errors" "__dropped"
  1791. printf "%s\n" "$out" |
  1792. awk '
  1793. {
  1794. offset = strftime("%z", $2);
  1795. print $1, substr($0, index($0,$7));
  1796. }' |
  1797. numfmt --field 2,6 --to=iec-i --format=%8.1fB |
  1798. numfmt --field 3,4,5,7,8,9 --to=si --format=%8.1f |
  1799. sed -r 's/(\.[0-9])([A-Z]?(iB|B)?)/\1:\2/g' |
  1800. sort
  1801. } | col:normalize:size -++++++++ |
  1802. sed -r '
  1803. s/(\.[0-9]):([A-Z]?iB)/\1 \2/g;
  1804. s/(\.[0-9]):([KMGTPE])/\1 \2/g;
  1805. s/ ([0-9]+)\.0:B/\1 /g;
  1806. s/ ([0-9]+)\.0:/\1 /g;
  1807. ' |
  1808. header:make 2
  1809. ;;
  1810. esac
  1811. }
  1812. header:make() {
  1813. local nb_line="${1:-1}"
  1814. local line
  1815. while ((nb_line-- > 0)); do
  1816. read-0a line
  1817. echo "${GRAY}$(printf "%s" "$line" | sed -r 's/_/ /g')${NORMAL}"
  1818. done
  1819. cat
  1820. }
  1821. stats:load_avg() {
  1822. local format="$1"
  1823. local out
  1824. out=$(host:sys:load_avg)
  1825. printf "%s\n" "$out" | rrd:update "" "load_avg|2:load_avg_1:GAUGE:U:U,3:load_avg_5:GAUGE:U:U,4:load_avg_15:GAUGE:U:U" || {
  1826. return 1
  1827. }
  1828. case "${format:-p}" in
  1829. raw|r)
  1830. printf "%s\n" "$out" | cut -f 2-5 -d " "
  1831. ;;
  1832. pretty|p)
  1833. {
  1834. echo "___1m" "___5m" "__15m"
  1835. printf "%s\n" "$out" | cut -f 3-5 -d " "
  1836. } | col:normalize:size +++ | header:make
  1837. ;;
  1838. esac
  1839. }
  1840. host:sys:load_avg() {
  1841. local uptime
  1842. uptime="$(uptime)"
  1843. uptime=${uptime##*: }
  1844. uptime=${uptime//,/}
  1845. printf "%s " "" "$(date +%s)" "$uptime"
  1846. }
  1847. cmdline.spec.gnu mongo
  1848. cmdline.spec::cmd:mongo:run() {
  1849. :
  1850. }
  1851. cmdline.spec.gnu upgrade
  1852. cmdline.spec:mongo:cmd:upgrade:run() {
  1853. : :posarg: [TARGET_VERSION] "Target version to migrate to"
  1854. : :optval: --service,-s "The mongo service name (defaults to 'mongo')"
  1855. : :optfla: --debug,-d "Display debugging information"
  1856. local URL
  1857. mongo_service="${opt_service:-mongo}"
  1858. available_actions=$(compose --get-available-actions) || exit 1
  1859. available_actionable_services=($(e "$available_actions" | yq 'keys().[]'))
  1860. if [[ " ${available_actionable_services[*]} " != *" $mongo_service "* ]]; then
  1861. err "Service '$mongo_service' was not found in current 'compose.yml'."
  1862. exit 1
  1863. fi
  1864. opts_compose=()
  1865. if [ -n "$opt_debug" ]; then
  1866. opts_compose+=("--debug")
  1867. else
  1868. opts_compose+=("-q")
  1869. fi
  1870. project_name=$(compose:project_name) || exit 1
  1871. containers="$(compose:service:containers "${project_name}" "${mongo_service}")" || exit 1
  1872. ## XXXvlab: quick hack, to make more beautiful later
  1873. cron_container=$(compose:service:containers "${project_name}" "cron")
  1874. containers="$containers $cron_container"
  1875. docker stop "$cron_container" >/dev/null 2>&1 || true
  1876. before_version=
  1877. uptodate=
  1878. upgraded=
  1879. msgerr=()
  1880. while read-0a-err errlvl line; do
  1881. echo "$line"
  1882. rline=$(printf "%s" "$line" | sed_compat "s/$__color_sequence_regex//g")
  1883. case "$rline" in
  1884. "II Current mongo version: "*)
  1885. before_version="${rline#II Current mongo version: }"
  1886. ;;
  1887. "II ${mongo_service} is already up-to-date.")
  1888. if [ -z "$before_version" ]; then
  1889. msgerr+=("expected a 'current version' line before the 'up-to-date' one.")
  1890. continue
  1891. fi
  1892. after_version="$before_version"
  1893. uptodate=1
  1894. ;;
  1895. "II Successfully upgraded from ${before_version} to "*)
  1896. after_version="${rline#II Successfully upgraded from ${before_version} to }"
  1897. upgraded=1
  1898. ;;
  1899. *)
  1900. :
  1901. ;;
  1902. esac
  1903. done < <(
  1904. ## -q to remove the display of ``compose`` related information
  1905. ## like relation resolution.
  1906. ## -c on the upgrade action to force color
  1907. ansi_color=yes p-0a-err compose -c "${opts_compose[@]}" upgrade "$mongo_service" --no-hint -c "$TARGET_VERSION"
  1908. )
  1909. if [ "$errlvl" != 0 ]; then
  1910. exit "$errlvl"
  1911. fi
  1912. if [ -n "$uptodate" ]; then
  1913. for container in "${containers[@]}"; do
  1914. [ -n "$container" ] || continue
  1915. Wrap -d "start ${DARKYELLOW}${mongo_service}${NORMAL}'s container" -- \
  1916. docker start "$container" || {
  1917. err "Failed to start container '$container'."
  1918. exit 1
  1919. }
  1920. done
  1921. exit 0
  1922. fi
  1923. if [ -z "$upgraded" ]; then
  1924. err "Unexpected output of 'upgrade' action with errorlevel 0 and without success"
  1925. exit 1
  1926. fi
  1927. desc="update \`compose.yml\` to set ${DARKYELLOW}$mongo_service${NORMAL}'s "
  1928. desc+="docker image to actual code version ${WHITE}${after_version}${NORMAL}"
  1929. Wrap -d "$desc" -- \
  1930. compose:file:value-change \
  1931. "${mongo_service}.docker-compose.image" \
  1932. "docker.0k.io/mongo:${after_version}-myc" || exit 1
  1933. echo "${WHITE}Launching final compose${NORMAL}"
  1934. compose up || exit 1
  1935. }
  1936. cmdline.spec.gnu bench
  1937. cmdline.spec::cmd:bench:run() {
  1938. depends sysbench
  1939. nbthread=$(lscpu | egrep "^CPU\(s\):" | cut -f 2 -d : | xargs echo)
  1940. single=$(sysbench cpu --cpu-max-prime=20000 run --threads=1 | grep "events per" | cut -f 2 -d : | xargs echo)
  1941. threaded=$(sysbench cpu --cpu-max-prime=20000 run --threads="$nbthread" | grep "events per" | cut -f 2 -d : | xargs echo)
  1942. echo "$threaded / $single / $nbthread"
  1943. }
  1944. cmdline::parse "$@"