You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2520 lines
76 KiB

  1. #!/bin/bash
  2. . /etc/shlib >/dev/null 2>&1 || {
  3. echo "Error: you don't have kal-shlib-core installed."
  4. echo ""
  5. echo " You might want to add `deb.kalysto.org` deb repository, you'll need root access,"
  6. echo " so you might want to run these command after a \`sudo -i\` for instance..."
  7. echo ""
  8. echo " echo deb https://deb.kalysto.org no-dist kal-alpha kal-beta kal-main \\"
  9. echo " > /etc/apt/sources.list.d/kalysto.org.list"
  10. echo " wget -O - https://deb.kalysto.org/conf/public-key.gpg | apt-key add -"
  11. echo " apt-get update -o Dir::Etc::sourcelist=sources.list.d/kalysto.org.list \\"
  12. echo " -o Dir::Etc::sourceparts=- -o APT::Get::List-Cleanup=0"
  13. echo ""
  14. echo " Then install package kal-shlib-*:"
  15. echo ""
  16. echo " apt install kal-shlib-{common,cmdline,config,cache,docker,pretty}"
  17. echo ""
  18. exit 1
  19. } >&2
  20. include common
  21. include parse
  22. include cmdline
  23. include config
  24. include cache
  25. include fn
  26. include docker
  27. [[ "${BASH_SOURCE[0]}" != "${0}" ]] && SOURCED=true
  28. version=0.1
  29. desc='Install backup'
  30. help=""
  31. version_gt() { test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"; }
  32. read-0a-err() {
  33. local ret="$1" eof="" idx=0 last=
  34. read -r -- "${ret?}" <<<"0"
  35. shift
  36. while [ "$1" ]; do
  37. last=$idx
  38. read -r -- "$1" || {
  39. ## Put this last value in ${!ret}
  40. eof="$1"
  41. read -r -- "$ret" <<<"${!eof}"
  42. break
  43. }
  44. ((idx++))
  45. shift
  46. done
  47. [ -z "$eof" ] || {
  48. if [ "$last" != 0 ]; then
  49. echo "Error: read-0a-err couldn't fill all value" >&2
  50. read -r -- "$ret" <<<"127"
  51. else
  52. if [ -z "${!ret}" ]; then
  53. echo "Error: last value is not a number, did you finish with an errorlevel ?" >&2
  54. read -r -- "$ret" <<<"126"
  55. fi
  56. fi
  57. false
  58. }
  59. }
  60. p-0a-err() {
  61. "$@"
  62. echo -n "$?"
  63. }
  64. docker:running-container-projects() {
  65. :cache: scope=session
  66. docker ps --format '{{.Label "com.docker.compose.project"}}' | sort | uniq
  67. }
  68. decorator._mangle_fn docker:running-container-projects
  69. ssh:mk-private-key() {
  70. local host="$1" service_name="$2"
  71. (
  72. settmpdir VPS_TMPDIR
  73. ssh-keygen -t rsa -N "" -f "$VPS_TMPDIR/rsync_rsa" -C "$service_name@$host" >/dev/null
  74. cat "$VPS_TMPDIR/rsync_rsa"
  75. )
  76. }
  77. mailcow:has-images-running() {
  78. local images
  79. images=$(docker ps --format '{{.Image}}' | sort | uniq)
  80. [[ $'\n'"$images" == *$'\n'"mailcow/"* ]]
  81. }
  82. mailcow:has-container-project-mentionning-mailcow() {
  83. local projects
  84. projects=$(docker:running-container-projects) || return 1
  85. [[ $'\n'"$projects"$'\n' == *mailcow* ]]
  86. }
  87. mailcow:has-running-containers() {
  88. mailcow:has-images-running ||
  89. mailcow:has-container-project-mentionning-mailcow
  90. }
  91. mailcow:get-root() {
  92. :cache: scope=session
  93. local dir
  94. for dir in {/opt{,/apps},/root}/mailcow-dockerized; do
  95. [ -d "$dir" ] || continue
  96. [ -r "$dir/mailcow.conf" ] || continue
  97. echo "$dir"
  98. return 0
  99. done
  100. return 1
  101. }
  102. decorator._mangle_fn mailcow:get-root
  103. compose:get-compose-yml() {
  104. :cache: scope=session
  105. local path
  106. path=$(DEBUG=1 DRY_RUN=1 compose 2>&1 | egrep '^\s+-e HOST_COMPOSE_YML_FILE=' | cut -f 2- -d "=" | cut -f 1 -d " ")
  107. [ -e "$path" ] || return 1
  108. echo "$path"
  109. }
  110. decorator._mangle_fn compose:get-compose-yml
  111. export -f compose:get-compose-yml
  112. compose:has-container-project-myc() {
  113. local projects
  114. projects=$(docker:running-container-projects) || return 1
  115. [[ $'\n'"$projects"$'\n' == *$'\n'"myc"$'\n'* ]]
  116. }
  117. compose:file:value-change() {
  118. local key="$1" value="$2"
  119. local compose_yml
  120. if ! compose_yml=$(compose:get-compose-yml); then
  121. err "Couldn't locate your 'compose.yml' file."
  122. return 1
  123. fi
  124. yaml:file:value-change "$compose_yml" "$key" "$value" || return 1
  125. }
  126. export -f compose:file:value-change
  127. yaml:file:value-change() {
  128. local file="$1" key="$2" value="$3" first=1 count=0 diff=""
  129. (
  130. cd "${file%/*}"
  131. while read-0 hunk; do
  132. if [ -n "$first" ]; then
  133. diff+="$hunk"
  134. first=
  135. continue
  136. fi
  137. if [[ "$hunk" =~ $'\n'"+"[[:space:]]+"${key##*.}:" ]]; then
  138. ((count++))
  139. diff+="$hunk" >&2
  140. else
  141. :
  142. # echo "discarding:" >&2
  143. # e "$hunk" | prefix " | " >&2
  144. fi
  145. done < <(
  146. export DEBUG=
  147. settmpdir YQ_TEMP
  148. cp "${file}" "$YQ_TEMP/compose.yml" &&
  149. yq -i ".${key} = \"${value}\"" "$YQ_TEMP/compose.yml" &&
  150. sed -ri 's/^([^# ])/\n\0/g' "$YQ_TEMP/compose.yml" &&
  151. diff -u0 -Z "${file}" "$YQ_TEMP/compose.yml" |
  152. sed -r "s/^(@@.*)$/\x00\1/g;s%^(\+\+\+) [^\t]+%\1 ${file}%g"
  153. printf "\0"
  154. )
  155. if [[ "$count" == 0 ]]; then
  156. err "No change made to '$file'."
  157. return 1
  158. fi
  159. if [[ "$count" != 1 ]]; then
  160. err "compose file change request seems dubious and was refused:"
  161. e "$diff" | prefix " | " >&2
  162. return 1
  163. fi
  164. echo Applying: >&2
  165. e "$diff" | prefix " | " >&2
  166. patch <<<"$diff"
  167. ) || exit 1
  168. }
  169. export -f yaml:file:value-change
  170. type:is-mailcow() {
  171. mailcow:get-root >/dev/null ||
  172. mailcow:has-running-containers
  173. }
  174. type:is-compose() {
  175. compose:get-compose-yml >/dev/null &&
  176. compose:has-container-project-myc
  177. }
  178. vps:get-type() {
  179. :cache: scope=session
  180. local fn
  181. for fn in $(declare -F | cut -f 3 -d " " | egrep "^type:is-"); do
  182. "$fn" && {
  183. echo "${fn#type:is-}"
  184. return 0
  185. }
  186. done
  187. return 1
  188. }
  189. decorator._mangle_fn vps:get-type
  190. mirror-dir:sources() {
  191. :cache: scope=session
  192. if ! shyaml get-values default.sources < /etc/mirror-dir/config.yml; then
  193. err "Couldn't query 'default.sources' in '/etc/mirror-dir/config.yml'."
  194. return 1
  195. fi
  196. }
  197. decorator._mangle_fn mirror-dir:sources
  198. mirror-dir:check-add() {
  199. local elt="$1" sources
  200. sources=$(mirror-dir:sources) || return 1
  201. if [[ $'\n'"$sources"$'\n' == *$'\n'"$elt"$'\n'* ]]; then
  202. info "Volume $elt already in sources"
  203. else
  204. Elt "Adding directory $elt"
  205. sed -i "/sources:/a\ - \"${elt}\"" \
  206. /etc/mirror-dir/config.yml
  207. Feedback || return 1
  208. fi
  209. }
  210. mirror-dir:check-add-vol() {
  211. local elt="$1"
  212. mirror-dir:check-add "/var/lib/docker/volumes/*_${elt}-*/_data"
  213. }
  214. ## The first colon is to prevent auto-export of function from shlib
  215. : ; bash-bug-5() { { cat; } < <(e) >/dev/null; ! cat "$1"; } && bash-bug-5 <(e) 2>/dev/null &&
  216. export BASH_BUG_5=1 && unset -f bash_bug_5
  217. wrap() {
  218. local label="$1" code="$2"
  219. shift 2
  220. export VERBOSE=1
  221. interpreter=/bin/bash
  222. if [ -n "$BASH_BUG_5" ]; then
  223. (
  224. settmpdir tmpdir
  225. fname=${label##*/}
  226. e "$code" > "$tmpdir/$fname" &&
  227. chmod +x "$tmpdir/$fname" &&
  228. Wrap -vsd "$label" -- "$interpreter" "$tmpdir/$fname" "$@"
  229. )
  230. else
  231. Wrap -vsd "$label" -- "$interpreter" <(e "$code") "$@"
  232. fi
  233. }
  234. ping_check() {
  235. #global ignore_ping_check
  236. local host="$1"
  237. ip=$(getent ahosts "$host" | egrep "^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\s+" |
  238. head -n 1 | cut -f 1 -d " ") || return 1
  239. my_ip=$(curl -s myip.kal.fr)
  240. if [ "$ip" != "$my_ip" ]; then
  241. if [ -n "$ignore_ping_check" ]; then
  242. warn "IP of '$host' ($ip) doesn't match mine ($my_ip). Ignoring due to ``--ignore-ping-check`` option."
  243. else
  244. err "IP of '$host' ($ip) doesn't match mine ($my_ip). Use ``--ignore-ping-check`` to ignore check."
  245. return 1
  246. fi
  247. fi
  248. }
  249. mailcow:install-backup() {
  250. local BACKUP_SERVER="$1" ignore_ping_check="$2" mailcow_root DOMAIN
  251. ## find installation
  252. mailcow_root=$(mailcow:get-root) || {
  253. err "Couldn't find a valid mailcow root directory."
  254. return 1
  255. }
  256. ## check ok
  257. DOMAIN=$(cat "$mailcow_root/.env" | grep ^MAILCOW_HOSTNAME= | cut -f 2 -d =) || {
  258. err "Couldn't find MAILCOW_HOSTNAME in file \"$mailcow_root/.env\"."
  259. return 1
  260. }
  261. ping_check "$DOMAIN" || return 1
  262. MYSQL_ROOT_PASSWORD=$(cat "$mailcow_root/.env" | grep ^DBROOT= | cut -f 2 -d =) || {
  263. err "Couldn't find DBROOT in file \"$mailcow_root/.env\"."
  264. return 1
  265. }
  266. if docker compose >/dev/null 2>&1; then
  267. MYSQL_CONTAINER=${MYSQL_CONTAINER:-mailcowdockerized-mysql-mailcow-1}
  268. else
  269. MYSQL_CONTAINER=${MYSQL_CONTAINER:-mailcowdockerized_mysql-mailcow_1}
  270. fi
  271. container_id=$(docker ps -f name="$MYSQL_CONTAINER" --format "{{.ID}}")
  272. if [ -z "$container_id" ]; then
  273. err "Couldn't find docker container named '$MYSQL_CONTAINER'."
  274. return 1
  275. fi
  276. export KEY_BACKUP_ID="mailcow"
  277. export MYSQL_ROOT_PASSWORD
  278. export MYSQL_CONTAINER
  279. export BACKUP_SERVER
  280. export DOMAIN
  281. wrap "Install rsync-backup on host" "
  282. cd /srv/charm-store/rsync-backup
  283. bash ./hooks/install.d/60-install.sh
  284. " || return 1
  285. wrap "Mysql dump install" "
  286. cd /srv/charm-store/mariadb
  287. bash ./hooks/install.d/60-backup.sh
  288. " || return 1
  289. ## Using https://github.com/mailcow/mailcow-dockerized/blob/master/helper-scripts/backup_and_restore.sh
  290. for elt in "vmail{,-attachments-vol}" crypt redis rspamd postfix; do
  291. mirror-dir:check-add-vol "$elt" || return 1
  292. done
  293. mirror-dir:check-add "$mailcow_root" || return 1
  294. mirror-dir:check-add "/var/backups/mysql" || return 1
  295. mirror-dir:check-add "/etc" || return 1
  296. dest="$BACKUP_SERVER"
  297. dest="${dest%/*}"
  298. ssh_options=()
  299. if [[ "$dest" == *":"* ]]; then
  300. port="${dest##*:}"
  301. dest="${dest%%:*}"
  302. ssh_options=(-p "$port")
  303. else
  304. port=""
  305. dest="${dest%%:*}"
  306. fi
  307. info "You can run this following command from an host having admin access to $dest:"
  308. echo " (Or send it to a backup admin of $dest)" >&2
  309. echo "ssh ${ssh_options[@]} myadmin@$dest ssh-key add '$(cat /var/lib/rsync/.ssh/id_rsa.pub)'"
  310. }
  311. compose:has_domain() {
  312. local compose_file="$1" host="$2" name conf relation relation_value domain server_aliases
  313. while read-0 name conf ; do
  314. name=$(e "$name" | shyaml get-value)
  315. if [[ "$name" =~ ^[a-zA-Z0-9-]+\.[a-zA-Z0-9-]+ ]]; then
  316. [ "$host" == "$name" ] && return 0
  317. fi
  318. rel=$(e "$conf" | shyaml -y get-value relations 2>/dev/null) || continue
  319. for relation in web-proxy publish-dir; do
  320. relation_value=$(e "$rel" | shyaml -y get-value "$relation" 2>/dev/null) || continue
  321. while read-0 label conf_relation; do
  322. domain=$(e "$conf_relation" | shyaml get-value "domain" 2>/dev/null) && {
  323. [ "$host" == "$domain" ] && return 0
  324. }
  325. server_aliases=$(e "$conf_relation" | shyaml get-values "server-aliases" 2>/dev/null) && {
  326. [[ $'\n'"$server_aliases" == *$'\n'"$host"$'\n'* ]] && return 0
  327. }
  328. done < <(e "$relation_value" | shyaml -y key-values-0)
  329. done
  330. done < <(shyaml -y key-values-0 < "$compose_file")
  331. return 1
  332. }
  333. compose:install-backup() {
  334. local BACKUP_SERVER="$1" service_name="$2" compose_file="$3" ignore_ping_check="$4" ignore_domain_check="$5"
  335. ## XXXvlab: far from perfect as it mimics and depends internal
  336. ## logic of current default way to get a domain in compose-core
  337. host=$(hostname)
  338. if ! compose:has_domain "$compose_file" "$host"; then
  339. if [ -n "$ignore_domain_check" ]; then
  340. warn "domain of '$host' not found in compose file '$compose_file'. Ignoring due to ``--ignore-domain-check`` option."
  341. else
  342. err "domain of '$host' not found in compose file '$compose_file'. Use ``--ignore-domain-check`` to ignore check."
  343. return 1
  344. fi
  345. fi
  346. ping_check "$host" || return 1
  347. if [ -e "/root/.ssh/rsync_rsa" ]; then
  348. warn "deleting private key in /root/.ssh/rsync_rsa, has we are not using it anymore."
  349. rm -fv /root/.ssh/rsync_rsa
  350. fi
  351. if [ -e "/root/.ssh/rsync_rsa.pub" ]; then
  352. warn "deleting public key in /root/.ssh/rsync_rsa.pub, has we are not using it anymore."
  353. rm -fv /root/.ssh/rsync_rsa.pub
  354. fi
  355. if service_cfg=$(cat "$compose_file" |
  356. shyaml get-value -y "$service_name" 2>/dev/null); then
  357. info "Entry for service ${DARKYELLOW}$service_name${NORMAL}" \
  358. "is already present in '$compose_file'."
  359. cfg=$(e "$service_cfg" | shyaml get-value -y options) || {
  360. err "No ${WHITE}options${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
  361. "entry in '$compose_file'."
  362. return 1
  363. }
  364. private_key=$(e "$cfg" | shyaml get-value private-key) || return 1
  365. target=$(e "$cfg" | shyaml get-value target) || return 1
  366. if [ "$target" != "$BACKUP_SERVER" ]; then
  367. err "Existing backup target '$target' is different" \
  368. "from specified '$BACKUP_SERVER'"
  369. return 1
  370. fi
  371. else
  372. private_key=$(ssh:mk-private-key "$host" "$service_name")
  373. cat <<EOF >> "$compose_file"
  374. $service_name:
  375. options:
  376. ident: $host
  377. target: $BACKUP_SERVER
  378. private-key: |
  379. $(e "$private_key" | sed -r 's/^/ /g')
  380. EOF
  381. fi
  382. dest="$BACKUP_SERVER"
  383. dest="${dest%/*}"
  384. ssh_options=()
  385. if [[ "$dest" == *":"* ]]; then
  386. port="${dest##*:}"
  387. dest="${dest%%:*}"
  388. ssh_options=(-p "$port")
  389. else
  390. port=""
  391. dest="${dest%%:*}"
  392. fi
  393. info "You can run this following command from an host having admin access to $dest:"
  394. echo " (Or send it to a backup admin of $dest)" >&2
  395. ## We remove ending label (label will be added or not in the
  396. ## private key, and thus here, depending on the version of
  397. ## openssh-client)
  398. public_key=$(ssh-keygen -y -f <(e "$private_key"$'\n') | sed -r 's/ [^ ]+@[^ ]+$//')
  399. echo "ssh ${ssh_options[@]} myadmin@$dest ssh-key add '$public_key compose@$host'"
  400. }
  401. backup-action() {
  402. local action="$1"
  403. shift
  404. vps_type=$(vps:get-type) || {
  405. err "Failed to get type of installation."
  406. return 1
  407. }
  408. if ! fn.exists "${vps_type}:${action}"; then
  409. err "type '${vps_type}' has no ${vps_type}:${action} implemented yet."
  410. return 1
  411. fi
  412. "${vps_type}:${action}" "$@"
  413. }
  414. compose:get_default_backup_host_ident() {
  415. local service_name="$1" ## Optional
  416. local compose_file service_cfg cfg target
  417. compose_file=$(compose:get-compose-yml)
  418. service_name="${service_name:-rsync-backup}"
  419. if ! service_cfg=$(cat "$compose_file" |
  420. shyaml get-value -y "$service_name" 2>/dev/null); then
  421. err "No service named '$service_name' found in 'compose.yml'."
  422. return 1
  423. fi
  424. cfg=$(e "$service_cfg" | shyaml get-value -y options) || {
  425. err "No ${WHITE}options${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
  426. "entry in '$compose_file'."
  427. return 1
  428. }
  429. if ! target=$(e "$cfg" | shyaml get-value target); then
  430. err "No ${WHITE}options.target${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
  431. "entry in '$compose_file'."
  432. fi
  433. if ! target=$(e "$cfg" | shyaml get-value target); then
  434. err "No ${WHITE}options.target${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
  435. "entry in '$compose_file'."
  436. fi
  437. if ! ident=$(e "$cfg" | shyaml get-value ident); then
  438. err "No ${WHITE}options.ident${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
  439. "entry in '$compose_file'."
  440. fi
  441. echo "$target $ident"
  442. }
  443. mailcow:get_default_backup_host_ident() {
  444. local content cron_line ident found dest cmd_line
  445. if ! [ -e "/etc/cron.d/mirror-dir" ]; then
  446. err "No '/etc/cron.d/mirror-dir' found."
  447. return 1
  448. fi
  449. content=$(cat /etc/cron.d/mirror-dir) || {
  450. err "Can't read '/etc/cron.d/mirror-dir'."
  451. return 1
  452. }
  453. if ! cron_line=$(e "$content" | grep "mirror-dir backup"); then
  454. err "Can't find 'mirror-dir backup' line in '/etc/cron.d/mirror-dir'."
  455. return 1
  456. fi
  457. cron_line=${cron_line%|*}
  458. cmd_line=(${cron_line#*root})
  459. found=
  460. dest=
  461. for arg in "${cmd_line[@]}"; do
  462. [ -n "$found" ] && {
  463. dest="$arg"
  464. break
  465. }
  466. [ "$arg" == "-d" ] && {
  467. found=1
  468. }
  469. done
  470. if ! [[ "$dest" =~ ^[\'\"a-zA-Z0-9:/.-]+$ ]]; then
  471. err "Can't find valid destination in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'."
  472. return 1
  473. fi
  474. if [[ "$dest" == \"*\" ]] || [[ "$dest" == \'*\' ]]; then
  475. ## unquoting, the eval should be safe because of previous check
  476. dest=$(eval e "$dest")
  477. fi
  478. if [ -z "$dest" ]; then
  479. err "Can't find destination in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'."
  480. return 1
  481. fi
  482. ## looking for ident
  483. found=
  484. ident=
  485. for arg in "${cmd_line[@]}"; do
  486. [ -n "$found" ] && {
  487. ident="$arg"
  488. break
  489. }
  490. [ "$arg" == "-h" ] && {
  491. found=1
  492. }
  493. done
  494. if ! [[ "$ident" =~ ^[\'\"a-zA-Z0-9.-]+$ ]]; then
  495. err "Can't find valid identifier in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'."
  496. return 1
  497. fi
  498. if [[ "$ident" == \"*\" ]] || [[ "$ident" == \'*\' ]]; then
  499. ## unquoting, the eval should be safe because of previous check
  500. ident=$(eval e "$ident")
  501. fi
  502. if [ -z "$ident" ]; then
  503. err "Can't find destination in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'."
  504. return 1
  505. fi
  506. echo "$dest $ident"
  507. }
  508. compose:service:containers() {
  509. local project="$1" service="$2"
  510. docker ps \
  511. --filter label="com.docker.compose.project=$project" \
  512. --filter label="compose.master-service=$service" \
  513. --format="{{.ID}}"
  514. }
  515. export -f compose:service:containers
  516. compose:service:container_one() {
  517. local project="$1" service="$2" container_id
  518. {
  519. read-0a container_id || {
  520. err "service ${DARKYELLOW}$service${NORMAL} has no running container."
  521. return 1
  522. }
  523. if read-0a _; then
  524. err "service ${DARKYELLOW}$service${NORMAL} has more than one running container."
  525. return 1
  526. fi
  527. } < <(compose:service:containers "$project" "$service")
  528. echo "$container_id"
  529. }
  530. export -f compose:service:container_one
  531. compose:service:container_first() {
  532. local project="$1" service="$2" container_id
  533. {
  534. read-0a container_id || {
  535. err "service ${DARKYELLOW}$service${NORMAL} has no running container."
  536. return 1
  537. }
  538. if read-0a _; then
  539. warn "service ${DARKYELLOW}$service${NORMAL} has more than one running container."
  540. fi
  541. } < <(compose:service:containers "$project" "$service")
  542. echo "$container_id"
  543. }
  544. export -f compose:service:container_first
  545. docker:running_containers() {
  546. :cache: scope=session
  547. docker ps --format="{{.ID}}"
  548. }
  549. decorator._mangle_fn docker:running_containers
  550. export -f docker:running_containers
  551. compose:project:containers() {
  552. local project="$1" opts
  553. opts+=(--filter label="com.docker.compose.project=$project")
  554. docker ps "${opts[@]}" \
  555. --format="{{.ID}}"
  556. }
  557. export -f compose:project:containers
  558. compose:charm:containers() {
  559. local project="$1" charm="$2"
  560. docker ps \
  561. --filter label="com.docker.compose.project=$project" \
  562. --filter label="compose.charm=$charm" \
  563. --format="{{.ID}}"
  564. }
  565. export -f compose:charm:containers
  566. compose:charm:container_one() {
  567. local project="$1" charm="$2" container_id
  568. {
  569. read-0a container_id || {
  570. err "charm ${DARKPINK}$charm${NORMAL} has no running container in project '$project'."
  571. return 1
  572. }
  573. if read-0a _; then
  574. err "charm ${DARKPINK}$charm${NORMAL} has more than one running container."
  575. return 1
  576. fi
  577. } < <(compose:charm:containers "$project" "$charm")
  578. echo "$container_id"
  579. }
  580. export -f compose:charm:container_one
  581. compose:charm:container_first() {
  582. local project="$1" charm="$2" container_id
  583. {
  584. read-0a container_id || {
  585. warn "charm ${DARKYELLOW}$charm${NORMAL} has no running container in project '$project'."
  586. }
  587. if read-0a _; then
  588. warn "charm ${DARKYELLOW}$charm${NORMAL} has more than one running container."
  589. fi
  590. } < <(compose:charm:containers "$project" "$charm")
  591. echo "$container_id"
  592. }
  593. export -f compose:charm:container_first
  594. compose:get_url() {
  595. local project_name="$1" service="$2" data_file network ip
  596. data_dir=("/var/lib/compose/relations/${project_name}/${service}-"*"/web-proxy")
  597. if [ "${#data_dir[@]}" -gt 1 ]; then
  598. err "More than one web-proxy relation." \
  599. "Current 'vps' algorithm is insufficient" \
  600. "to figure out which relation is concerned"
  601. return 1
  602. fi
  603. data_file="${data_dir[0]}/data"
  604. if [ -d "${data_file%/*}" ]; then
  605. (
  606. set -o pipefail
  607. ## users can't cat directly the content
  608. docker run --rm \
  609. -v "${data_file%/*}":/tmp/dummy alpine \
  610. cat "/tmp/dummy/${data_file##*/}" |
  611. shyaml get-value url
  612. )
  613. else
  614. ## Assume there are no frontend relation here, the url is direct IP
  615. container_id=$(compose:service:container_one "${project_name}" "${service}") || return 1
  616. network_ip=$(docker:container:network_ip_one "${container_id}") || return 1
  617. IFS=":" read -r network ip <<<"$network_ip"
  618. tcp_port=
  619. for port in $(docker:exposed_ports "$container_id"); do
  620. IFS="/" read port type <<<"$port"
  621. [ "$type" == "tcp" ] || continue
  622. tcp_port="$port"
  623. break
  624. done
  625. echo -n "http://$ip"
  626. [ -n "$tcp_port" ] && echo ":$tcp_port"
  627. fi || {
  628. err "Failed querying ${service} to frontend relation to get url."
  629. return 1
  630. }
  631. }
  632. export -f compose:get_url
  633. compose:container:service() {
  634. local container="$1" service
  635. if ! service=$(docker:container:label "$container" "compose.service"); then
  636. err "Failed to get service name from container ${container}."
  637. return 1
  638. fi
  639. if [ -z "$service" ]; then
  640. err "No service found for container ${container}."
  641. return 1
  642. fi
  643. echo "$service"
  644. }
  645. export -f compose:container:service
  646. compose:psql() {
  647. local project_name="$1" dbname="$2" container_id
  648. shift 2
  649. container_id=$(compose:charm:container_one "$project_name" "postgres") || return 1
  650. docker exec -i "${container_id}" psql -U postgres "$dbname" "$@"
  651. }
  652. export -f compose:psql
  653. compose:mongo() {
  654. local project_name="$1" dbname="$2" container_id
  655. container_id=$(compose:charm:container_one "$project_name" "mongo") || return 1
  656. docker exec -i "${container_id}" mongo --quiet "$dbname"
  657. }
  658. export -f compose:mongo
  659. compose:pgm() {
  660. local project_name="$1" container_network_ip container_ip container_network
  661. shift
  662. container_id=$(compose:charm:container_one "$project_name" "postgres") || return 1
  663. service_name=$(compose:container:service "$container_id") || return 1
  664. image_id=$(docker:container:image "$container_id") || return 1
  665. container_network_ip=$(docker:container:network_ip_one "$container_id") || return 1
  666. IFS=":" read -r container_network container_ip <<<"$container_network_ip"
  667. pgpass="/srv/datastore/data/${service_name}/var/lib/postgresql/data/pgpass"
  668. local final_pgm_docker_run_opts+=(
  669. -u 0 -e prefix_pg_local_command=" "
  670. --network "${container_network}"
  671. -e PGHOST="$container_ip"
  672. -e PGUSER=postgres
  673. -v "$pgpass:/root/.pgpass"
  674. "${pgm_docker_run_opts[@]}"
  675. )
  676. cmd=(docker run --rm \
  677. "${final_pgm_docker_run_opts[@]}" \
  678. "${image_id}" pgm "$@"
  679. )
  680. echo "${cmd[@]}"
  681. "${cmd[@]}"
  682. }
  683. export -f compose:pgm
  684. postgres:dump() {
  685. local project_name="$1" src="$2" dst="$3"
  686. (
  687. settmpdir PGM_TMP_LOCATION
  688. pgm_docker_run_opts=('-v' "${PGM_TMP_LOCATION}:/tmp/dump")
  689. compose:pgm "$project_name" cp -f "$src" "/tmp/dump/dump.gz" &&
  690. mv "$PGM_TMP_LOCATION/dump.gz" "$dst"
  691. ) || return 1
  692. }
  693. export -f postgres:dump
  694. postgres:restore() {
  695. local project_name="$1" src="$2" dst="$3"
  696. full_src_path=$(readlink -e "$src") || exit 1
  697. (
  698. pgm_docker_run_opts=('-v' "${full_src_path}:/tmp/dump.gz")
  699. compose:pgm "$project_name" cp -f "/tmp/dump.gz" "$dst"
  700. ) || return 1
  701. }
  702. export -f postgres:restore
  703. odoo:get_public_user_id() {
  704. local project_name="$1" dbname="$2"
  705. echo "select res_id from ir_model_data where model = 'res.users' and name = 'public_user';" |
  706. compose:psql "$project_name" "$dbname" -qAt
  707. }
  708. cyclos:set_root_url() {
  709. local project_name="$1" dbname="$2" url="$3"
  710. echo "UPDATE configurations SET root_url = '$url';" |
  711. compose:psql "$project_name" "$dbname" || {
  712. err "Failed to set cyclos url value in '$dbname' database."
  713. return 1
  714. }
  715. }
  716. export -f cyclos:set_root_url
  717. cyclos:unlock() {
  718. local project_name="$1" dbname="$2"
  719. echo "delete from database_lock;" |
  720. compose:psql "${project_name}" "${dbname}"
  721. }
  722. export -f cyclos:unlock
  723. rocketchat:drop-indexes() {
  724. local project_name="$1" dbname="$2"
  725. echo "db.users.dropIndexes()" |
  726. compose:mongo "${project_name}" "${dbname}"
  727. }
  728. export -f rocketchat:drop-indexes
  729. compose:project_name() {
  730. if [ -z "$PROJECT_NAME" ]; then
  731. PROJECT_NAME=$(compose --get-project-name) || {
  732. err "Couldn't get project name."
  733. return 1
  734. }
  735. if [ -z "$PROJECT_NAME" -o "$PROJECT_NAME" == "orphan" ]; then
  736. err "Couldn't get project name, probably because 'compose.yml' wasn't found."
  737. echo " Please ensure to either configure a global 'compose.yml' or run this command" >&2
  738. echo " in a compose project (with 'compose.yml' on the top level directory)." >&2
  739. return 1
  740. fi
  741. export PROJECT_NAME
  742. fi
  743. echo "$PROJECT_NAME"
  744. }
  745. export -f compose:project_name
  746. compose:get_cron_docker_cmd() {
  747. local cron_line cmd_line docker_cmd
  748. project_name=$(compose:project_name) || return 1
  749. if ! cron_line=$(docker exec "${project_name}"_cron_1 cat /etc/cron.d/rsync-backup | grep "\* \* \*"); then
  750. err "Can't find cron_line in cron container."
  751. echo " Have you forgotten to run 'compose up' ?" >&2
  752. return 1
  753. fi
  754. cron_line=${cron_line%|*}
  755. cron_line=${cron_line%"2>&1"*}
  756. cmd_line="${cron_line#*root}"
  757. eval "args=($cmd_line)"
  758. ## should be last argument
  759. docker_cmd=$(echo ${args[@]: -1})
  760. if ! [[ "$docker_cmd" == "docker run --rm -e "* ]]; then
  761. echo "docker command found should start with 'docker run'." >&2
  762. echo "Here's command:" >&2
  763. echo " $docker_cmd" >&2
  764. return 1
  765. fi
  766. e "$docker_cmd"
  767. }
  768. compose:recover-target() {
  769. local backup_host="$1" ident="$2" src="$3" dst="$4" service_name="${5:-rsync-backup}" project_name
  770. project_name=$(compose:project_name) || return 1
  771. docker_image="${project_name}_${service_name}"
  772. if ! docker_has_image "$docker_image"; then
  773. compose build "${service_name}" || {
  774. err "Couldn't find nor build image for service '$service_name'."
  775. return 1
  776. }
  777. fi
  778. dst="${dst%/}" ## remove final slash
  779. ssh_options=(-o StrictHostKeyChecking=no)
  780. if [[ "$backup_host" == *":"* ]]; then
  781. port="${backup_host##*:}"
  782. backup_host="${backup_host%%:*}"
  783. ssh_options+=(-p "$port")
  784. else
  785. port=""
  786. backup_host="${backup_host%%:*}"
  787. fi
  788. rsync_opts=(
  789. -e "ssh ${ssh_options[*]} -i /var/lib/rsync/.ssh/id_rsa -l rsync"
  790. -azvArH --delete --delete-excluded
  791. --partial --partial-dir .rsync-partial
  792. --numeric-ids
  793. )
  794. if [ "$DRY_RUN" ]; then
  795. rsync_opts+=("-n")
  796. fi
  797. cmd=(
  798. docker run --rm --entrypoint rsync \
  799. -v "/srv/datastore/config/${service_name}/var/lib/rsync":/var/lib/rsync \
  800. -v "${dst%/*}":/mnt/dest \
  801. "$docker_image" \
  802. "${rsync_opts[@]}" "$backup_host":"/var/mirror/$ident/$src" "/mnt/dest/${dst##*/}"
  803. )
  804. echo "${WHITE}Launching: ${NORMAL} ${cmd[@]}"
  805. "${cmd[@]}"
  806. }
  807. mailcow:recover-target() {
  808. local backup_host="$1" ident="$2" src="$3" dst="$4"
  809. dst="${dst%/}" ## remove final slash
  810. ssh_options=(-o StrictHostKeyChecking=no)
  811. if [[ "$backup_host" == *":"* ]]; then
  812. port="${backup_host##*:}"
  813. backup_host="${backup_host%%:*}"
  814. ssh_options+=(-p "$port")
  815. else
  816. port=""
  817. backup_host="${backup_host%%:*}"
  818. fi
  819. rsync_opts=(
  820. -e "ssh ${ssh_options[*]} -i /var/lib/rsync/.ssh/id_rsa -l rsync"
  821. -azvArH --delete --delete-excluded
  822. --partial --partial-dir .rsync-partial
  823. --numeric-ids
  824. )
  825. if [ "$DRY_RUN" ]; then
  826. rsync_opts+=("-n")
  827. fi
  828. cmd=(
  829. rsync "${rsync_opts[@]}" "$backup_host":"/var/mirror/$ident/$src" "${dst}"
  830. )
  831. echo "${WHITE}Launching: ${NORMAL} ${cmd[@]}"
  832. "${cmd[@]}"
  833. }
  834. nextcloud:src:version() {
  835. local version
  836. if ! version=$(cat "/srv/datastore/data/${nextcloud_service}/var/www/html/version.php" 2>/dev/null); then
  837. err "Can't find version.php file to get last version installed."
  838. exit 1
  839. fi
  840. version=$(e "$version" | grep 'VersionString =' | cut -f 3 -d ' ' | cut -f 2 -d "'")
  841. if [ -z "$version" ]; then
  842. err "Can't figure out version from version.php content."
  843. exit 1
  844. fi
  845. echo "$version"
  846. }
  847. container:health:check-fix:container-aliveness() {
  848. local container_id="$1"
  849. timeout 5s docker inspect "$container_id" >/dev/null 2>&1
  850. errlvl=$?
  851. if [ "$errlvl" == 124 ]; then
  852. service_name=$(docker ps --filter id="$container_id" --format '{{.Label "com.docker.compose.service"}}')
  853. container_name=($(docker ps --filter id="$container_id" --format '{{.Names}}'))
  854. pid=$(ps ax -o pid,command -ww | grep docker-containerd-shim |
  855. grep "/$container_id" |
  856. sed -r 's/^ *//g' |
  857. cut -f 1 -d " ")
  858. if [ -z "$pid" ]; then
  859. err "container ${DARKCYAN}${container_name[0]}${NORMAL} for ${DARKYELLOW}$service_name${NORMAL} doesn't answer to 'inspect' command. Can't find its PID neither."
  860. return 1
  861. fi
  862. echo "container ${DARKCYAN}${container_name[0]}${NORMAL} for ${DARKYELLOW}$service_name${NORMAL} doesn't answer to 'inspect' command (pid: $pid)."
  863. Wrap -d "kill pid $pid and restart" <<EOF
  864. kill "$pid"
  865. sleep 2
  866. docker restart "$container_id"
  867. EOF
  868. fi
  869. return $errlvl
  870. }
  871. container:health:check-fix:no-matching-entries() {
  872. local container_id="$1"
  873. out=$(docker exec "$container_id" echo 2>&1)
  874. errlvl=$?
  875. [ "$errlvl" == 0 ] && return 0
  876. service_name=$(docker ps --filter id="$container_id" --format '{{.Label "com.docker.compose.service"}}')
  877. container_name=($(docker ps --filter id="$container_id" --format '{{.Names}}'))
  878. if [ "$errlvl" == 126 ] && [[ "$out" == *"no matching entries in passwd file"* ]]; then
  879. echo "container ${DARKCYAN}${container_name[0]}${NORMAL} for ${DARKYELLOW}$service_name${NORMAL} has ${DARKRED}no-matching-entries${NORMAL} bug." >&2
  880. Wrap -d "restarting container of ${DARKYELLOW}$service_name${NORMAL} twice" <<EOF
  881. docker restart "$container_id"
  882. sleep 2
  883. docker restart "$container_id"
  884. EOF
  885. return $errlvl
  886. fi
  887. warn "Unknown issue with ${DARKYELLOW}$service_name${NORMAL}'s container:"
  888. echo " ${WHITE}cmd:${NORMAL} docker exec -ti $container_id echo" >&2
  889. echo "$out" | prefix " ${DARKGRAY}|${NORMAL} " >&2
  890. echo " ${DARKGRAY}..${NORMAL} leaving this as-is."
  891. return $errlvl
  892. }
  893. docker:api() {
  894. local endpoint="$1"
  895. curl -sS --unix-socket /var/run/docker.sock "http://localhost$endpoint"
  896. }
  897. docker:containers:id() {
  898. docker:api /containers/json | jq -r ".[] | .Id"
  899. }
  900. docker:containers:names() {
  901. docker:api /containers/json | jq -r '.[] | .Names[0] | ltrimstr("/")'
  902. }
  903. docker:container:stats() {
  904. container="$1"
  905. docker:api "/containers/$container/stats?stream=false"
  906. }
  907. docker:containers:stats() {
  908. :cache: scope=session
  909. local jobs='' line container id_names sha names name data service project
  910. local DC="com.docker.compose"
  911. local PSF_values=(
  912. ".ID" ".Names" ".Label \"$DC.project\"" ".Label \"$DC.service\"" ".Image"
  913. )
  914. local PSF="$(printf "{{%s}} " "${PSF_values[@]}")"
  915. id_names=$(docker ps -a --format="$PSF") || return 1
  916. ## Create a docker container table from name/sha to service, project, image_name
  917. declare -A resolve
  918. while read-0a line; do
  919. sha=${line%% *}; line=${line#* }
  920. names=${line%% *}; line=${line#* }
  921. names=(${names//,/ })
  922. for name in "${names[@]}"; do
  923. resolve["$name"]="$line"
  924. done
  925. resolve["$sha"]="$line"
  926. done < <(printf "%s\n" "$id_names")
  927. declare -A data
  928. while read-0a line; do
  929. name=${line%% *}; line=${line#* }
  930. ts=${line%% *}; line=${line#* }
  931. resolved="${resolve["$name"]}"
  932. project=${resolved%% *}; resolved=${resolved#* }
  933. service=${resolved%% *}; resolved=${resolved#* }
  934. image_name="$resolved"
  935. if [ -z "$service" ]; then
  936. project="@"
  937. service=$(docker inspect "$image_name" | jq -r '.[0].RepoTags[0]')
  938. service=${service//\//_}
  939. fi
  940. if [ -n "${data["$project/$service"]}" ]; then
  941. previous=(${data["$project/$service"]})
  942. previous=(${previous[@]:1})
  943. current=($line)
  944. sum=()
  945. i=0; max=${#previous[@]}
  946. while (( i < max )); do
  947. sum+=($((${previous[$i]} + ${current[$i]})))
  948. ((i++))
  949. done
  950. data["$project/$service"]="$ts ${sum[*]}"
  951. else
  952. data["$project/$service"]="$ts $line"
  953. fi
  954. done < <(
  955. for container in "$@"; do
  956. (
  957. docker:container:stats "${container}" |
  958. jq -r '
  959. (.name | ltrimstr("/"))
  960. + " " + (.read | sub("\\.[0-9]+Z"; "Z") | fromdate | tostring)
  961. + " " + (.memory_stats.usage | tostring)
  962. + " " + (.memory_stats.stats.inactive_file | tostring)
  963. + " " + ((.memory_stats.usage - .memory_stats.stats.inactive_file) | tostring)
  964. + " " + (.memory_stats.limit | tostring)
  965. + " " + (.networks.eth0.rx_bytes | tostring)
  966. + " " + (.networks.eth0.rx_packets | tostring)
  967. + " " + (.networks.eth0.rx_errors | tostring)
  968. + " " + (.networks.eth0.rx_dropped | tostring)
  969. + " " + (.networks.eth0.tx_bytes | tostring)
  970. + " " + (.networks.eth0.tx_packets | tostring)
  971. + " " + (.networks.eth0.tx_errors | tostring)
  972. + " " + (.networks.eth0.tx_dropped | tostring)
  973. '
  974. ) &
  975. jobs=1
  976. done
  977. [ -n "$jobs" ] && wait
  978. )
  979. for label in "${!data[@]}"; do
  980. echo "$label ${data[$label]}"
  981. done
  982. }
  983. decorator._mangle_fn docker:containers:stats
  984. export -f docker:containers:stats
  985. col:normalize:size() {
  986. local alignment=$1
  987. awk -v alignment="$alignment" '{
  988. # Store the entire line in the lines array.
  989. lines[NR] = $0;
  990. # Split the line into fields.
  991. split($0, fields);
  992. # Update max for each field.
  993. for (i = 1; i <= length(fields); i++) {
  994. if (length(fields[i]) > max[i]) {
  995. max[i] = length(fields[i]);
  996. }
  997. }
  998. }
  999. END {
  1000. # Print lines with fields padded to max.
  1001. for (i = 1; i <= NR; i++) {
  1002. split(lines[i], fields);
  1003. line = "";
  1004. for (j = 1; j <= length(fields); j++) {
  1005. # Get alignment for the current field.
  1006. align = substr(alignment, j, 1);
  1007. if (align != "+") {
  1008. align = "-"; # Default to left alignment if not "+".
  1009. }
  1010. line = line sprintf("%" align max[j] "s ", fields[j]);
  1011. }
  1012. print line;
  1013. }
  1014. }'
  1015. }
  1016. rrd:create() {
  1017. local prefix="$1"
  1018. shift
  1019. local label="$1" step="300" src_def
  1020. shift
  1021. if [ -z "$VAR_DIR" ]; then
  1022. err "Unset \$VAR_DIR, can't create rrd graph"
  1023. return 1
  1024. fi
  1025. mkdir -p "$VAR_DIR"
  1026. if ! [ -d "$VAR_DIR" ]; then
  1027. err "Invalid \$VAR_DIR: '$VAR_DIR' is not a directory"
  1028. return 1
  1029. fi
  1030. if ! type -p rrdtool >/dev/null 2>&1; then
  1031. apt-get install rrdtool -y --force-yes </dev/null
  1032. if ! type -p rrdtool 2>/dev/null 2>&1; then
  1033. err "Couldn't find nor install 'rrdtool'."
  1034. return 1
  1035. fi
  1036. fi
  1037. local RRD_PATH="$VAR_DIR/rrd"
  1038. local RRD_FILE="$RRD_PATH/$prefix/$label.rrd"
  1039. mkdir -p "${RRD_FILE%/*}"
  1040. if [ -f "$RRD_FILE" ]; then
  1041. err "File '$RRD_FILE' already exists, use a different label."
  1042. return 1
  1043. fi
  1044. local rrd_ds_opts=()
  1045. for src_def in "$@"; do
  1046. IFS=":" read -r name type min max rra_types <<<"$src_def"
  1047. rra_types=${rra_types:-average,max,min}
  1048. rrd_ds_opts+=("DS:$name:$type:900:$min:$max")
  1049. done
  1050. local step=120
  1051. local times=( ## with steps 120 is 2mn datapoint
  1052. 2m:1w
  1053. 6m:3w
  1054. 30m:12w
  1055. 3h:1y
  1056. 1d:10y
  1057. 1w:2080w
  1058. )
  1059. rrd_rra_opts=()
  1060. for time in "${times[@]}"; do
  1061. rrd_rra_opts+=("RRA:"{AVERAGE,MIN,MAX}":0.5:$time")
  1062. done
  1063. cmd=(
  1064. rrdtool create "$RRD_FILE" \
  1065. --step "$step" \
  1066. "${rrd_ds_opts[@]}" \
  1067. "${rrd_rra_opts[@]}"
  1068. )
  1069. "${cmd[@]}" || {
  1070. err "Failed command: ${cmd[@]}"
  1071. return 1
  1072. }
  1073. }
  1074. rrd:update() {
  1075. local prefix="$1"
  1076. shift
  1077. while read-0a data; do
  1078. [ -z "$data" ] && continue
  1079. IFS="~" read -ra data <<<"${data// /\~}"
  1080. label="${data[0]}"
  1081. ts="${data[1]}"
  1082. for arg in "$@"; do
  1083. IFS="|" read -r name arg <<<"$arg"
  1084. rrd_label="${label}/${name}"
  1085. rrd_create_opt=()
  1086. rrd_update_opt="$ts"
  1087. for col_def in ${arg//,/ }; do
  1088. col=${col_def%%:*}; create_def=${col_def#*:}
  1089. rrd_update_opt="${rrd_update_opt}:${data[$col]}"
  1090. rrd_create_opt+=("$create_def")
  1091. done
  1092. local RRD_ROOT_PATH="$VAR_DIR/rrd"
  1093. local RRD_PATH="$RRD_ROOT_PATH/${prefix%/}"
  1094. local RRD_FILE="${RRD_PATH%/}/${rrd_label#/}.rrd"
  1095. if ! [ -f "$RRD_FILE" ]; then
  1096. info "Creating new RRD file '${RRD_FILE#$RRD_ROOT_PATH/}'"
  1097. if ! rrd:create "$prefix" "${rrd_label}" "${rrd_create_opt[@]}" </dev/null ; then
  1098. err "Couldn't create new RRD file ${rrd_label} with options: '${rrd_create_opt[*]}'"
  1099. return 1
  1100. fi
  1101. fi
  1102. rrdtool update "$RRD_FILE" "$rrd_update_opt" || {
  1103. err "update failed with options: '$rrd_update_opt'"
  1104. return 1
  1105. }
  1106. done
  1107. done
  1108. }
  1109. [ "$SOURCED" ] && return 0
  1110. ##
  1111. ## Command line processing
  1112. ##
  1113. cmdline.spec.gnu
  1114. cmdline.spec.reporting
  1115. cmdline.spec.gnu install
  1116. cmdline.spec::cmd:install:run() {
  1117. :
  1118. }
  1119. cmdline.spec.gnu get-type
  1120. cmdline.spec::cmd:get-type:run() {
  1121. vps:get-type
  1122. }
  1123. cmdline.spec:install:cmd:backup:run() {
  1124. : :posarg: BACKUP_SERVER 'Target backup server'
  1125. : :optfla: --ignore-domain-check \
  1126. "Allow to bypass the domain check in
  1127. compose file (only used in compose
  1128. installation)."
  1129. : :optfla: --ignore-ping-check "Allow to bypass the ping check of host."
  1130. local vps_type
  1131. vps_type=$(vps:get-type) || {
  1132. err "Failed to get type of installation."
  1133. return 1
  1134. }
  1135. if ! fn.exists "${vps_type}:install-backup"; then
  1136. err "type '${vps_type}' has no backup installation implemented yet."
  1137. return 1
  1138. fi
  1139. opts=()
  1140. [ "$opt_ignore_ping_check" ] &&
  1141. opts+=("--ignore-ping-check")
  1142. if [ "$vps_type" == "compose" ]; then
  1143. [ "$opt_ignore_domain_check" ] &&
  1144. opts+=("--ignore-domain-check")
  1145. fi
  1146. "cmdline.spec:install:cmd:${vps_type}-backup:run" "${opts[@]}" "$BACKUP_SERVER"
  1147. }
  1148. DEFAULT_BACKUP_SERVICE_NAME=rsync-backup
  1149. cmdline.spec.gnu compose-backup
  1150. cmdline.spec:install:cmd:compose-backup:run() {
  1151. : :posarg: BACKUP_SERVER 'Target backup server'
  1152. : :optval: --service-name,-s "YAML service name in compose
  1153. file to check for existence of key.
  1154. Defaults to '$DEFAULT_BACKUP_SERVICE_NAME'"
  1155. : :optval: --compose-file,-f "Compose file location. Defaults to
  1156. the value of '\$DEFAULT_COMPOSE_FILE'"
  1157. : :optfla: --ignore-domain-check \
  1158. "Allow to bypass the domain check in
  1159. compose file."
  1160. : :optfla: --ignore-ping-check "Allow to bypass the ping check of host."
  1161. local service_name compose_file
  1162. [ -e "/etc/compose/local.conf" ] && source /etc/compose/local.conf
  1163. compose_file=${opt_compose_file:-$DEFAULT_COMPOSE_FILE}
  1164. service_name=${opt_service_name:-$DEFAULT_BACKUP_SERVICE_NAME}
  1165. if ! [ -e "$compose_file" ]; then
  1166. err "Compose file not found in '$compose_file'."
  1167. return 1
  1168. fi
  1169. compose:install-backup "$BACKUP_SERVER" "$service_name" "$compose_file" \
  1170. "$opt_ignore_ping_check" "$opt_ignore_domain_check"
  1171. }
  1172. cmdline.spec:install:cmd:mailcow-backup:run() {
  1173. : :posarg: BACKUP_SERVER 'Target backup server'
  1174. : :optfla: --ignore-ping-check "Allow to bypass the ping check of host."
  1175. "mailcow:install-backup" "$BACKUP_SERVER" "$opt_ignore_ping_check"
  1176. }
  1177. cmdline.spec.gnu backup
  1178. cmdline.spec::cmd:backup:run() {
  1179. local vps_type
  1180. vps_type=$(vps:get-type) || {
  1181. err "Failed to get type of installation."
  1182. return 1
  1183. }
  1184. if ! fn.exists "cmdline.spec:backup:cmd:${vps_type}:run"; then
  1185. err "type '${vps_type}' has no backup process implemented yet."
  1186. return 1
  1187. fi
  1188. "cmdline.spec:backup:cmd:${vps_type}:run"
  1189. }
  1190. cmdline.spec:backup:cmd:mailcow:run() {
  1191. local cmd_line cron_line cmd
  1192. for f in mysql-backup mirror-dir; do
  1193. [ -e "/etc/cron.d/$f" ] || {
  1194. err "Can't find '/etc/cron.d/$f'."
  1195. echo " Have you forgotten to run 'vps install backup BACKUP_HOST' ?" >&2
  1196. return 1
  1197. }
  1198. if ! cron_line=$(cat "/etc/cron.d/$f" |
  1199. grep -v "^#" | grep "\* \* \*"); then
  1200. err "Can't find cron_line in '/etc/cron.d/$f'." \
  1201. "Have you modified it ?"
  1202. return 1
  1203. fi
  1204. cron_line=${cron_line%|*}
  1205. cmd_line=(${cron_line#*root})
  1206. if [ "$f" == "mirror-dir" ]; then
  1207. cmd=()
  1208. for arg in "${cmd_line[@]}"; do
  1209. [ "$arg" != "-q" ] && cmd+=("$arg")
  1210. done
  1211. else
  1212. cmd=("${cmd_line[@]}")
  1213. fi
  1214. code="${cmd[*]}"
  1215. echo "${WHITE}Launching:${NORMAL} ${code}"
  1216. {
  1217. {
  1218. (
  1219. ## Some commands are using colors that are already
  1220. ## set by this current program and will trickle
  1221. ## down unwantedly
  1222. ansi_color no
  1223. eval "${code}"
  1224. ) | sed -r "s/^/ ${GRAY}|${NORMAL} /g"
  1225. set_errlvl "${PIPESTATUS[0]}"
  1226. } 3>&1 1>&2 2>&3 | sed -r "s/^/ $DARKRED\!$NORMAL /g"
  1227. set_errlvl "${PIPESTATUS[0]}"
  1228. } 3>&1 1>&2 2>&3
  1229. if [ "$?" != "0" ]; then
  1230. err "Failed."
  1231. return 1
  1232. fi
  1233. done
  1234. info "Mysql backup and subsequent mirror-dir ${DARKGREEN}succeeded${NORMAL}."
  1235. }
  1236. set_errlvl() { return "${1:-1}"; }
  1237. cmdline.spec:backup:cmd:compose:run() {
  1238. local cron_line args
  1239. project_name=$(compose:project_name) || return 1
  1240. docker_cmd=$(compose:get_cron_docker_cmd) || return 1
  1241. echo "${WHITE}Launching:${NORMAL} docker exec -i "${project_name}_cron_1" $docker_cmd"
  1242. {
  1243. {
  1244. eval "docker exec -i \"${project_name}_cron_1\" $docker_cmd" | sed -r "s/^/ ${GRAY}|${NORMAL} /g"
  1245. set_errlvl "${PIPESTATUS[0]}"
  1246. } 3>&1 1>&2 2>&3 | sed -r "s/^/ $DARKRED\!$NORMAL /g"
  1247. set_errlvl "${PIPESTATUS[0]}"
  1248. } 3>&1 1>&2 2>&3
  1249. if [ "$?" != "0" ]; then
  1250. err "Failed."
  1251. return 1
  1252. fi
  1253. info "mirror-dir ${DARKGREEN}succeeded${NORMAL}."
  1254. }
  1255. cmdline.spec.gnu recover-target
  1256. cmdline.spec::cmd:recover-target:run() {
  1257. : :posarg: BACKUP_DIR 'Source directory on backup side'
  1258. : :posarg: HOST_DIR 'Target directory on host side'
  1259. : :optval: --backup-host,-B "The backup host"
  1260. : :optfla: --dry-run,-n "Don't do anything, instead tell what it
  1261. would do."
  1262. ## if no backup host take the one by default
  1263. backup_host="$opt_backup_host"
  1264. if [ -z "$backup_host" ]; then
  1265. backup_host_ident=$(backup-action get_default_backup_host_ident) || return 1
  1266. read -r backup_host ident <<<"$backup_host_ident"
  1267. fi
  1268. if [[ "$BACKUP_DIR" == /* ]]; then
  1269. err "BACKUP_DIR must be a relative path from the root of your backup."
  1270. return 1
  1271. fi
  1272. REAL_HOST_DIR=$(realpath "$HOST_DIR") || {
  1273. err "Can't find HOST_DIR '$HOST_DIR'."
  1274. return 1
  1275. }
  1276. export DRY_RUN="${opt_dry_run}"
  1277. backup-action recover-target "$backup_host" "$ident" "$BACKUP_DIR" "$REAL_HOST_DIR"
  1278. }
  1279. cmdline.spec.gnu odoo
  1280. cmdline.spec::cmd:odoo:run() {
  1281. :
  1282. }
  1283. cmdline.spec.gnu restart
  1284. cmdline.spec:odoo:cmd:restart:run() {
  1285. : :optval: --service,-s "The service (defaults to 'odoo')"
  1286. local out odoo_service
  1287. odoo_service="${opt_service:-odoo}"
  1288. project_name=$(compose:project_name) || return 1
  1289. if ! out=$(docker restart "${project_name}_${odoo_service}_1" 2>&1); then
  1290. if [[ "$out" == *"no matching entries in passwd file" ]]; then
  1291. warn "Catched docker bug. Restarting once more."
  1292. if ! out=$(docker restart "${project_name}_${odoo_service}_1"); then
  1293. err "Can't restart container ${project_name}_${odoo_service}_1 (restarted twice)."
  1294. echo " output:" >&2
  1295. echo "$out" | prefix " ${GRAY}|${NORMAL} " >&2
  1296. exit 1
  1297. fi
  1298. else
  1299. err "Couldn't restart container ${project_name}_${odoo_service}_1 (and no restart bug detected)."
  1300. exit 1
  1301. fi
  1302. fi
  1303. info "Container ${project_name}_${odoo_service}_1 was ${DARKGREEN}successfully${NORMAL} restarted."
  1304. }
  1305. cmdline.spec.gnu restore
  1306. cmdline.spec:odoo:cmd:restore:run() {
  1307. : :posarg: ZIP_DUMP_LOCATION 'Source odoo dump file to restore
  1308. (can be a local file or an url)'
  1309. : :optval: --service,-s "The service (defaults to 'odoo')"
  1310. : :optval: --database,-D 'Target database (default if not specified)'
  1311. : :optfla: --neutralize,-n "Restore database in neutralized state."
  1312. : :optfla: --debug,-d "Display more information."
  1313. local out
  1314. odoo_service="${opt_service:-odoo}"
  1315. if [[ "$ZIP_DUMP_LOCATION" == "http://"* ]] ||
  1316. [[ "$ZIP_DUMP_LOCATION" == "https://"* ]]; then
  1317. settmpdir ZIP_TMP_LOCATION
  1318. tmp_location="$ZIP_TMP_LOCATION/dump.zip"
  1319. curl -k -s -L "$ZIP_DUMP_LOCATION" > "$tmp_location" || {
  1320. err "Couldn't get '$ZIP_DUMP_LOCATION'."
  1321. exit 1
  1322. }
  1323. if [[ "$(dd if="$tmp_location" count=2 bs=1 2>/dev/null)" != "PK" ]]; then
  1324. err "Download doesn't seem to be a zip file."
  1325. dd if="$tmp_location" count=1 bs=256 | hd | prefix " ${GRAY}|${NORMAL} " >&2
  1326. exit 1
  1327. fi
  1328. info "Successfully downloaded '$ZIP_DUMP_LOCATION'"
  1329. echo " in '$tmp_location'." >&2
  1330. ZIP_DUMP_LOCATION="$tmp_location"
  1331. fi
  1332. [ -e "$ZIP_DUMP_LOCATION" ] || {
  1333. err "No file '$ZIP_DUMP_LOCATION' found." >&2
  1334. exit 1
  1335. }
  1336. opts_compose=()
  1337. [ -t 1 ] && opts_compose+=("--color")
  1338. [ "$opt_debug" ] && {
  1339. VERBOSE=1
  1340. opts_compose+=("--debug")
  1341. }
  1342. opts_load=()
  1343. [ "$opt_neutralize" ] && opts_load+=("--neutralize")
  1344. #cmdline.spec:odoo:cmd:restart:run --service "$odoo_service" || exit 1
  1345. msg_dbname=default
  1346. [ -n "$opt_database" ] && msg_dbname="'$opt_database'"
  1347. Wrap -vsd "drop $msg_dbname database of service ${DARKYELLOW}$odoo_service${NORMAL}" -- \
  1348. compose --no-hooks "${opts_compose[@]}" drop "$odoo_service" $opt_database || {
  1349. err "Error dropping $msg_dbname database of service ${DARKYELLOW}$odoo_service${NORMAL}:"
  1350. [ -z "$opt_debug" ] && {
  1351. echo " Use \`\`--debug\`\` (or \`\`-d\`\`) to get more information." >&2
  1352. }
  1353. exit 1
  1354. }
  1355. Wrap -vsd "restore $msg_dbname database of service ${DARKYELLOW}$odoo_service${NORMAL}" -- \
  1356. compose --no-hooks "${opts_compose[@]}" \
  1357. load "$odoo_service" $opt_database "${opts_load[@]}" < "$ZIP_DUMP_LOCATION" || {
  1358. err "Error restoring service ${DARKYELLOW}$odoo_service${NORMAL} to $msg_dbname database."
  1359. [ -z "$opt_debug" ] && {
  1360. echo " Use \`\`--debug\`\` (or \`\`-d\`\`) to get more information." >&2
  1361. }
  1362. exit 1
  1363. }
  1364. ## Restart odoo, ensure there is no bugs lingering on it.
  1365. cmdline.spec:odoo:cmd:restart:run --service "$odoo_service" || exit 1
  1366. }
  1367. cmdline.spec.gnu dump
  1368. cmdline.spec:odoo:cmd:dump:run() {
  1369. : :posarg: DUMP_ZIPFILE 'Target path to store odoo dump zip file.'
  1370. : :optval: --database,-d 'Target database (default if not specified)'
  1371. : :optval: --service,-s "The service (defaults to 'odoo')"
  1372. odoo_service="${opt_service:-odoo}"
  1373. msg_dbname=default
  1374. [ -n "$opt_database" ] && msg_dbname="'$opt_database'"
  1375. compose --no-hooks save "$odoo_service" $opt_database > "$DUMP_ZIPFILE" || {
  1376. err "Error dumping ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database to '$DUMP_ZIPFILE'."
  1377. exit 1
  1378. }
  1379. info "Successfully dumped ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database to '$DUMP_ZIPFILE'."
  1380. }
  1381. cmdline.spec.gnu drop
  1382. cmdline.spec:odoo:cmd:drop:run() {
  1383. : :optval: --database,-d 'Target database (default if not specified)'
  1384. : :optval: --service,-s "The service (defaults to 'odoo')"
  1385. odoo_service="${opt_service:-odoo}"
  1386. msg_dbname=default
  1387. [ -n "$opt_database" ] && msg_dbname="'$opt_database'"
  1388. compose --no-hooks drop "$odoo_service" $opt_database || {
  1389. err "Error dropping ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database."
  1390. exit 1
  1391. }
  1392. info "Successfully dropped ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database."
  1393. }
  1394. cmdline.spec.gnu set-cyclos-url
  1395. cmdline.spec:odoo:cmd:set-cyclos-url:run() {
  1396. : :optval: --database,-d "Target database ('odoo' if not specified)"
  1397. : :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
  1398. local URL
  1399. dbname=${opt_database:-odoo}
  1400. cyclos_service="${opt_service:-cyclos}"
  1401. project_name=$(compose:project_name) || exit 1
  1402. URL=$(compose:get_url "${project_name}" "${cyclos_service}") || exit 1
  1403. Wrap -d "set cyclos url to '$URL'" <<EOF || exit 1
  1404. echo "UPDATE res_company SET cyclos_server_url = '$URL/api' WHERE id=1;" |
  1405. compose:psql "$project_name" "$dbname" || {
  1406. err "Failed to set cyclos url value in '$dbname' database."
  1407. exit 1
  1408. }
  1409. EOF
  1410. }
  1411. cmdline.spec.gnu fix-sso
  1412. cmdline.spec:odoo:cmd:fix-sso:run() {
  1413. : :optval: --database,-d "Target database ('odoo' if not specified)"
  1414. local public_user_id project_name dbname
  1415. dbname=${opt_database:-odoo}
  1416. project_name=$(compose:project_name) || exit 1
  1417. public_user_id=$(odoo:get_public_user_id "${project_name}" "${dbname}") || exit 1
  1418. Wrap -d "fix website's object to 'public_user' (id=$public_user_id)" <<EOF || exit 1
  1419. echo "UPDATE website SET user_id = $public_user_id;" |
  1420. compose:psql "$project_name" "$dbname" || {
  1421. err "Failed to set website's object user_id to public user's id ($public_user_id) in '$dbname' database."
  1422. exit 1
  1423. }
  1424. EOF
  1425. }
  1426. cmdline.spec.gnu cyclos
  1427. cmdline.spec::cmd:cyclos:run() {
  1428. :
  1429. }
  1430. cmdline.spec:cyclos:cmd:dump:run() {
  1431. : :posarg: DUMP_GZFILE 'Target path to store odoo dump gz file.'
  1432. : :optval: --database,-d "Target database ('cyclos' if not specified)"
  1433. : :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
  1434. cyclos_service="${opt_service:-cyclos}"
  1435. cyclos_database="${opt_database:-cyclos}"
  1436. project_name=$(compose:project_name) || exit 1
  1437. container_id=$(compose:service:container_one "$project_name" "${cyclos_service}") || exit 1
  1438. Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1439. docker stop "$container_id" || exit 1
  1440. Wrap -d "Dump postgres database '${cyclos_database}'." -- \
  1441. postgres:dump "${project_name}" "$cyclos_database" "$DUMP_GZFILE" || exit 1
  1442. Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1443. docker start "${container_id}" || exit 1
  1444. }
  1445. cmdline.spec.gnu restore
  1446. cmdline.spec:cyclos:cmd:restore:run() {
  1447. : :posarg: GZ_DUMP_LOCATION 'Source cyclos dump file to restore
  1448. (can be a local file or an url)'
  1449. : :optval: --service,-s "The service (defaults to 'cyclos')"
  1450. : :optval: --database,-d 'Target database (default if not specified)'
  1451. local out
  1452. cyclos_service="${opt_service:-cyclos}"
  1453. cyclos_database="${opt_database:-cyclos}"
  1454. project_name=$(compose:project_name) || exit 1
  1455. url=$(compose:get_url "${project_name}" "${cyclos_service}") || return 1
  1456. container_id=$(compose:service:container_one "$project_name" "${cyclos_service}") || exit 1
  1457. if [[ "$GZ_DUMP_LOCATION" == "http://"* ]] ||
  1458. [[ "$GZ_DUMP_LOCATION" == "https://"* ]]; then
  1459. settmpdir GZ_TMP_LOCATION
  1460. tmp_location="$GZ_TMP_LOCATION/dump.gz"
  1461. Wrap -d "get '$GZ_DUMP_LOCATION'" <<EOF || exit 1
  1462. ## Note that curll version before 7.76.0 do not have
  1463. curl -k -s -L "$GZ_DUMP_LOCATION" --fail \\
  1464. > "$tmp_location" || {
  1465. echo "Error fetching ressource. Is url correct ?" >&2
  1466. exit 1
  1467. }
  1468. if [[ "\$(dd if="$tmp_location" count=2 bs=1 2>/dev/null |
  1469. hexdump -v -e "/1 \"%02x\"")" != "1f8b" ]]; then
  1470. err "Download doesn't seem to be a gzip file."
  1471. dd if="$tmp_location" count=1 bs=256 | hd | prefix " ${GRAY}|${NORMAL} " >&2
  1472. exit 1
  1473. fi
  1474. EOF
  1475. GZ_DUMP_LOCATION="$tmp_location"
  1476. fi
  1477. [ -e "$GZ_DUMP_LOCATION" ] || {
  1478. err "No file '$GZ_DUMP_LOCATION' found." >&2
  1479. exit 1
  1480. }
  1481. Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1482. docker stop "$container_id" || exit 1
  1483. ## XXXvlab: making the assumption that the postgres username should
  1484. ## be the same as the cyclos service selected (which is the default,
  1485. ## but not always the case).
  1486. Wrap -d "restore postgres database '${cyclos_database}'." -- \
  1487. postgres:restore "$project_name" "$GZ_DUMP_LOCATION" "${cyclos_service}@${cyclos_database}" || exit 1
  1488. ## ensure that the database is not locked
  1489. Wrap -d "check and remove database lock if any" -- \
  1490. cyclos:unlock "${project_name}" "${cyclos_database}" || exit 1
  1491. Wrap -d "set root url to '$url'" -- \
  1492. cyclos:set_root_url "${project_name}" "${cyclos_database}" "${url}" || exit 1
  1493. Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1494. docker start "${container_id}" || exit 1
  1495. }
  1496. cmdline.spec.gnu set-root-url
  1497. cmdline.spec:cyclos:cmd:set-root-url:run() {
  1498. : :optval: --database,-d "Target database ('cyclos' if not specified)"
  1499. : :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
  1500. local URL
  1501. cyclos_database=${opt_database:-cyclos}
  1502. cyclos_service="${opt_service:-cyclos}"
  1503. project_name=$(compose:project_name) || exit 1
  1504. url=$(compose:get_url "${project_name}" "${cyclos_service}") || exit 1
  1505. container_id=$(compose:service:container_one "${project_name}" "${cyclos_service}") || exit 1
  1506. Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1507. docker stop "$container_id" || exit 1
  1508. Wrap -d "set root url to '$url'" -- \
  1509. cyclos:set_root_url "${project_name}" "${cyclos_database}" "${url}" || exit 1
  1510. Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1511. docker start "${container_id}" || exit 1
  1512. }
  1513. cmdline.spec.gnu unlock
  1514. cmdline.spec:cyclos:cmd:unlock:run() {
  1515. : :optval: --database,-d "Target database ('cyclos' if not specified)"
  1516. : :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
  1517. local URL
  1518. cyclos_database=${opt_database:-cyclos}
  1519. cyclos_service="${opt_service:-cyclos}"
  1520. project_name=$(compose:project_name) || exit 1
  1521. container_id=$(compose:service:container_one "${project_name}" "${cyclos_service}") || exit 1
  1522. Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1523. docker stop "$container_id" || exit 1
  1524. Wrap -d "check and remove database lock if any" -- \
  1525. cyclos:unlock "${project_name}" "${cyclos_database}" || exit 1
  1526. Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1527. docker start "${container_id}" || exit 1
  1528. }
  1529. cmdline.spec.gnu rocketchat
  1530. cmdline.spec::cmd:rocketchat:run() {
  1531. :
  1532. }
  1533. cmdline.spec.gnu drop-indexes
  1534. cmdline.spec:rocketchat:cmd:drop-indexes:run() {
  1535. : :optval: --database,-d "Target database ('rocketchat' if not specified)"
  1536. : :optval: --service,-s "The rocketchat service name (defaults to 'rocketchat')"
  1537. local URL
  1538. rocketchat_database=${opt_database:-rocketchat}
  1539. rocketchat_service="${opt_service:-rocketchat}"
  1540. project_name=$(compose:project_name) || exit 1
  1541. container_id=$(compose:service:container_one "${project_name}" "${rocketchat_service}") || exit 1
  1542. Wrap -d "stop ${DARKYELLOW}${rocketchat_service}${NORMAL}'s container" -- \
  1543. docker stop "$container_id" || exit 1
  1544. errlvl=0
  1545. Wrap -d "drop indexes" -- \
  1546. rocketchat:drop-indexes "${project_name}" "${rocketchat_database}" || {
  1547. errlvl=1
  1548. errmsg="Failed to drop indexes"
  1549. }
  1550. Wrap -d "start ${DARKYELLOW}${rocketchat_service}${NORMAL}'s container" -- \
  1551. docker start "${container_id}" || exit 1
  1552. if [ "$errlvl" != 0 ]; then
  1553. err "$errmsg"
  1554. fi
  1555. exit "$errlvl"
  1556. }
  1557. cmdline.spec.gnu nextcloud
  1558. cmdline.spec::cmd:nextcloud:run() {
  1559. :
  1560. }
  1561. cmdline.spec.gnu upgrade
  1562. cmdline.spec:nextcloud:cmd:upgrade:run() {
  1563. : :posarg: [TARGET_VERSION] "Target version to migrate to"
  1564. : :optval: --service,-s "The nexcloud service name (defaults to 'nextcloud')"
  1565. local URL
  1566. nextcloud_service="${opt_service:-nextcloud}"
  1567. project_name=$(compose:project_name) || exit 1
  1568. containers=$(compose:service:containers "${project_name}" "${nextcloud_service}") || exit 1
  1569. container_stopped=()
  1570. if [ -n "$containers" ]; then
  1571. for container in $containers; do
  1572. Wrap -d "stop ${DARKYELLOW}${nextcloud_service}${NORMAL}'s container" -- \
  1573. docker stop "$container" || {
  1574. err "Failed to stop container '$container'."
  1575. exit 1
  1576. }
  1577. container_stopped+=("$container")
  1578. done
  1579. fi
  1580. before_version=$(nextcloud:src:version) || exit 1
  1581. ## -q to remove the display of ``compose`` related information
  1582. ## like relation resolution.
  1583. ## --no-hint to remove the final hint about modifying your
  1584. ## ``compose.yml``.
  1585. compose -q upgrade "$nextcloud_service" --no-hint "$TARGET_VERSION"
  1586. errlvl="$?"
  1587. after_version=$(nextcloud:src:version)
  1588. if [ "$after_version" != "$before_version" ]; then
  1589. desc="update \`compose.yml\` to set ${DARKYELLOW}$nextcloud_service${NORMAL}'s "
  1590. desc+="docker image to actual code version ${WHITE}${after_version}${NORMAL}"
  1591. Wrap -d "$desc" -- \
  1592. compose:file:value-change \
  1593. "${nextcloud_service}.docker-compose.image" \
  1594. "docker.0k.io/nextcloud:${after_version}-myc" || exit 1
  1595. fi
  1596. if [ "$errlvl" == 0 ]; then
  1597. echo "${WHITE}Launching final compose${NORMAL}"
  1598. compose up || exit 1
  1599. fi
  1600. exit "$errlvl"
  1601. }
  1602. cmdline.spec.gnu check-fix
  1603. cmdline.spec::cmd:check-fix:run() {
  1604. : :posarg: [SERVICES...] "Optional service to check"
  1605. : :optval: --check,-c "Specify a check or a list of checks separated by commas"
  1606. : :optfla: --silent,-s "Don't ouput anything if everything goes well"
  1607. local project_name service_name containers container check
  1608. all_checks=$(declare -F |
  1609. egrep '^declare -fx? container:health:check-fix:[^ ]+$' |
  1610. cut -f 4 -d ":")
  1611. checks=(${opt_check//,/ })
  1612. for check in "${checks[@]}"; do
  1613. fn.exists container:health:check-fix:$check || {
  1614. err "check '$check' not found."
  1615. return 1
  1616. }
  1617. done
  1618. if [ "${#checks[*]}" == 0 ]; then
  1619. checks=($all_checks)
  1620. fi
  1621. ## XXXvlab: could make it parallel
  1622. project_name=$(compose:project_name) || exit 1
  1623. containers=($(compose:project:containers "${project_name}")) || exit 1
  1624. found=
  1625. for container in "${containers[@]}"; do
  1626. service_name=$(docker ps --filter id="$container" --format '{{.Label "com.docker.compose.service"}}')
  1627. if [ "${#SERVICES[@]}" -gt 0 ]; then
  1628. [[ " ${SERVICES[*]} " == *" $service_name "* ]] || continue
  1629. fi
  1630. found=1
  1631. one_bad=
  1632. for check in "${checks[@]}"; do
  1633. if ! container:health:check-fix:"$check" "$container"; then
  1634. one_bad=1
  1635. fi
  1636. done
  1637. if [ -z "$opt_silent" ] && [ -z "$one_bad" ]; then
  1638. Elt "containers have been checked for ${DARKYELLOW}$service_name${NORMAL}"
  1639. Feedback
  1640. fi
  1641. done
  1642. if [ -z "$found" ]; then
  1643. if [ -z "$opt_silent" ]; then
  1644. if [ "${#SERVICES[@]}" -gt 0 ]; then
  1645. warn "No container for given services found in current project '$project_name'."
  1646. else
  1647. warn "No container found for current project '$project_name'."
  1648. fi
  1649. fi
  1650. return 1
  1651. fi
  1652. }
  1653. awk:require() {
  1654. local require_at_least="$1" version already_installed
  1655. while true; do
  1656. if ! version=$(awk --version 2>/dev/null); then
  1657. version=""
  1658. else
  1659. version=${version%%,*}
  1660. version=${version##* }
  1661. fi
  1662. if [ -z "$version" ] || version_gt "$require_at_least" "$version"; then
  1663. if [ -z "$already_installed" ]; then
  1664. if [ -z "$version" ]; then
  1665. info "No 'gawk' available, probably using a clone. Installing 'gawk'..."
  1666. else
  1667. info "Found gawk version '$version'. Updating 'gawk'..."
  1668. fi
  1669. apt-get install gawk -y </dev/null || {
  1670. err "Failed to install 'gawk'."
  1671. return 1
  1672. }
  1673. already_installed=true
  1674. else
  1675. if [ -z "$version" ]; then
  1676. err "No 'gawk' available even after having installed one"
  1677. else
  1678. err "'gawk' version '$version' is lower than required" \
  1679. "'$require_at_least' even after updating 'gawk'."
  1680. fi
  1681. return 1
  1682. fi
  1683. continue
  1684. fi
  1685. return 0
  1686. done
  1687. }
  1688. cmdline.spec.gnu stats
  1689. cmdline.spec::cmd:stats:run() {
  1690. : :optval: --format,-f "Either 'silent', 'raw', or 'pretty', default is pretty."
  1691. : :optfla: --silent,-s "Shorthand for '--format silent'"
  1692. : :optval: --resource,-r 'resource(s) separated with a comma'
  1693. local project_name service_name containers container check
  1694. if [[ -n "${opt_silent}" ]]; then
  1695. if [[ -n "${opt_format}" ]]; then
  1696. err "'--silent' conflict with option '--format'."
  1697. return 1
  1698. fi
  1699. opt_format=s
  1700. fi
  1701. opt_format="${opt_format:-pretty}"
  1702. case "${opt_format}" in
  1703. raw|r)
  1704. opt_format="raw"
  1705. :
  1706. ;;
  1707. silent|s)
  1708. opt_format="silent"
  1709. ;;
  1710. pretty|p)
  1711. opt_format="pretty"
  1712. awk:require 4.1.4 || return 1
  1713. ;;
  1714. *)
  1715. err "Invalid value '$opt_format' for option --format"
  1716. echo " use either 'raw' (shorthand 'r'), 'silent' (shorthand 's') or pretty (shorthand 'p')." >&2
  1717. return 1
  1718. esac
  1719. local resources=(c.{memory,network} load_avg)
  1720. if [ -n "${opt_resource}" ]; then
  1721. resources=(${opt_resource//,/ })
  1722. fi
  1723. local not_found=()
  1724. for resource in "${resources[@]}"; do
  1725. if ! fn.exists "stats:$resource"; then
  1726. not_found+=("$resource")
  1727. fi
  1728. done
  1729. if [[ "${#not_found[@]}" -gt 0 ]]; then
  1730. not_found_msg=$(printf "%s, " "${not_found[@]}")
  1731. not_found_msg=${not_found_msg%, }
  1732. err "Unsupported resource(s) provided: ${not_found_msg}"
  1733. echo " resource must be one-of:" >&2
  1734. declare -F | egrep -- '-fx? stats:[a-zA-Z0-9_.]+$' | cut -f 3- -d " " | cut -f 2- -d ":" | prefix " - " >&2
  1735. return 1
  1736. fi
  1737. :state-dir:
  1738. for resource in "${resources[@]}"; do
  1739. if [ "$opt_format" == "pretty" ]; then
  1740. echo "${WHITE}$resource${NORMAL}:"
  1741. stats:"$resource" "$opt_format" 2>&1 | prefix " "
  1742. else
  1743. stats:"$resource" "$opt_format" 2>&1 | prefix "$resource "
  1744. fi
  1745. set_errlvl "${PIPESTATUS[0]}" || return 1
  1746. done
  1747. }
  1748. stats:c.memory() {
  1749. local format="$1"
  1750. local out
  1751. container_to_check=($(docker:running_containers)) || exit 1
  1752. out=$(docker:containers:stats "${container_to_check[@]}")
  1753. printf "%s\n" "$out" | rrd:update "containers" "memory|3:usage:GAUGE:U:U,4:inactive:GAUGE:U:U" || {
  1754. return 1
  1755. }
  1756. case "${format:-p}" in
  1757. raw|r)
  1758. printf "%s\n" "$out" | cut -f 1-5 -d " "
  1759. ;;
  1760. pretty|p)
  1761. awk:require 4.1.4 || return 1
  1762. {
  1763. echo "container" "__total____" "buffered____" "resident____"
  1764. printf "%s\n" "$out" |
  1765. awk '
  1766. {
  1767. offset = strftime("%z", $2);
  1768. print $1, substr($0, index($0,$3));
  1769. }' | cut -f 1-4 -d " " |
  1770. numfmt --field 2-4 --to=iec-i --format=%8.1fB |
  1771. sed -r 's/(\.[0-9])([A-Z]?iB)/\1:\2/g' |
  1772. sort
  1773. } | col:normalize:size -+++ |
  1774. sed -r 's/(\.[0-9]):([A-Z]?iB)/\1 \2/g' |
  1775. header:make
  1776. ;;
  1777. esac
  1778. }
  1779. stats:c.network() {
  1780. local format="$1"
  1781. local out
  1782. container_to_check=($(docker:running_containers)) || exit 1
  1783. out=$(docker:containers:stats "${container_to_check[@]}")
  1784. cols=(
  1785. {rx,tx}_{bytes,packets,errors,dropped}
  1786. )
  1787. idx=5 ## starting column idx for next fields
  1788. defs=()
  1789. for col in "${cols[@]}"; do
  1790. defs+=("$((idx++)):${col}:COUNTER:U:U")
  1791. done
  1792. OLDIFS="$IFS"
  1793. IFS="," defs="${defs[*]}"
  1794. IFS="$OLDIFS"
  1795. printf "%s\n" "$out" |
  1796. rrd:update "containers" \
  1797. "network|${defs}" || {
  1798. return 1
  1799. }
  1800. case "${format:-p}" in
  1801. raw|r)
  1802. printf "%s\n" "$out" | cut -f 1,2,7- -d " "
  1803. ;;
  1804. pretty|p)
  1805. awk:require 4.1.4 || return 1
  1806. {
  1807. echo "container" "_" "_" "_" "RX" "_" "_" "_" "TX"
  1808. echo "_" "__bytes____" "__packets" "__errors" "__dropped" "__bytes____" "__packets" "__errors" "__dropped"
  1809. printf "%s\n" "$out" |
  1810. awk '
  1811. {
  1812. offset = strftime("%z", $2);
  1813. print $1, substr($0, index($0,$7));
  1814. }' |
  1815. numfmt --field 2,6 --to=iec-i --format=%8.1fB |
  1816. numfmt --field 3,4,5,7,8,9 --to=si --format=%8.1f |
  1817. sed -r 's/(\.[0-9])([A-Z]?(iB|B)?)/\1:\2/g' |
  1818. sort
  1819. } | col:normalize:size -++++++++ |
  1820. sed -r '
  1821. s/(\.[0-9]):([A-Z]?iB)/\1 \2/g;
  1822. s/(\.[0-9]):([KMGTPE])/\1 \2/g;
  1823. s/ ([0-9]+)\.0:B/\1 /g;
  1824. s/ ([0-9]+)\.0:/\1 /g;
  1825. ' |
  1826. header:make 2
  1827. ;;
  1828. esac
  1829. }
  1830. header:make() {
  1831. local nb_line="${1:-1}"
  1832. local line
  1833. while ((nb_line-- > 0)); do
  1834. read-0a line
  1835. echo "${GRAY}$(printf "%s" "$line" | sed -r 's/_/ /g')${NORMAL}"
  1836. done
  1837. cat
  1838. }
  1839. stats:load_avg() {
  1840. local format="$1"
  1841. local out
  1842. out=$(host:sys:load_avg)
  1843. printf "%s\n" "$out" | rrd:update "" "load_avg|2:load_avg_1:GAUGE:U:U,3:load_avg_5:GAUGE:U:U,4:load_avg_15:GAUGE:U:U" || {
  1844. return 1
  1845. }
  1846. case "${format:-p}" in
  1847. raw|r)
  1848. printf "%s\n" "$out" | cut -f 2-5 -d " "
  1849. ;;
  1850. pretty|p)
  1851. {
  1852. echo "___1m" "___5m" "__15m"
  1853. printf "%s\n" "$out" | cut -f 3-5 -d " "
  1854. } | col:normalize:size +++ | header:make
  1855. ;;
  1856. esac
  1857. }
  1858. host:sys:load_avg() {
  1859. local uptime
  1860. uptime="$(uptime)"
  1861. uptime=${uptime##*: }
  1862. uptime=${uptime//,/}
  1863. printf "%s " "" "$(date +%s)" "$uptime"
  1864. }
  1865. cmdline.spec.gnu mongo
  1866. cmdline.spec::cmd:mongo:run() {
  1867. :
  1868. }
  1869. cmdline.spec.gnu upgrade
  1870. cmdline.spec:mongo:cmd:upgrade:run() {
  1871. : :posarg: [TARGET_VERSION] "Target version to migrate to"
  1872. : :optval: --service,-s "The mongo service name (defaults to 'mongo')"
  1873. : :optfla: --debug,-d "Display debugging information"
  1874. local URL
  1875. mongo_service="${opt_service:-mongo}"
  1876. available_actions=$(compose --get-available-actions) || exit 1
  1877. available_actionable_services=($(e "$available_actions" | yq 'keys().[]'))
  1878. if [[ " ${available_actionable_services[*]} " != *" $mongo_service "* ]]; then
  1879. err "Service '$mongo_service' was not found in current 'compose.yml'."
  1880. exit 1
  1881. fi
  1882. opts_compose=()
  1883. if [ -n "$opt_debug" ]; then
  1884. opts_compose+=("--debug")
  1885. else
  1886. opts_compose+=("-q")
  1887. fi
  1888. project_name=$(compose:project_name) || exit 1
  1889. containers="$(compose:service:containers "${project_name}" "${mongo_service}")" || exit 1
  1890. ## XXXvlab: quick hack, to make more beautiful later
  1891. cron_container=$(compose:service:containers "${project_name}" "cron")
  1892. containers="$containers $cron_container"
  1893. docker stop "$cron_container" >/dev/null 2>&1 || true
  1894. before_version=
  1895. uptodate=
  1896. upgraded=
  1897. msgerr=()
  1898. while read-0a-err errlvl line; do
  1899. echo "$line"
  1900. rline=$(printf "%s" "$line" | sed_compat "s/$__color_sequence_regex//g")
  1901. case "$rline" in
  1902. "II Current mongo version: "*)
  1903. before_version="${rline#II Current mongo version: }"
  1904. ;;
  1905. "II ${mongo_service} is already up-to-date.")
  1906. if [ -z "$before_version" ]; then
  1907. msgerr+=("expected a 'current version' line before the 'up-to-date' one.")
  1908. continue
  1909. fi
  1910. after_version="$before_version"
  1911. uptodate=1
  1912. ;;
  1913. "II Successfully upgraded from ${before_version} to "*)
  1914. after_version="${rline#II Successfully upgraded from ${before_version} to }"
  1915. upgraded=1
  1916. ;;
  1917. *)
  1918. :
  1919. ;;
  1920. esac
  1921. done < <(
  1922. ## -q to remove the display of ``compose`` related information
  1923. ## like relation resolution.
  1924. ## -c on the upgrade action to force color
  1925. ansi_color=yes p-0a-err compose -c "${opts_compose[@]}" upgrade "$mongo_service" --no-hint -c "$TARGET_VERSION"
  1926. )
  1927. if [ "$errlvl" != 0 ]; then
  1928. exit "$errlvl"
  1929. fi
  1930. if [ -n "$uptodate" ]; then
  1931. for container in "${containers[@]}"; do
  1932. [ -n "$container" ] || continue
  1933. Wrap -d "start ${DARKYELLOW}${mongo_service}${NORMAL}'s container" -- \
  1934. docker start "$container" || {
  1935. err "Failed to start container '$container'."
  1936. exit 1
  1937. }
  1938. done
  1939. exit 0
  1940. fi
  1941. if [ -z "$upgraded" ]; then
  1942. err "Unexpected output of 'upgrade' action with errorlevel 0 and without success"
  1943. exit 1
  1944. fi
  1945. desc="update \`compose.yml\` to set ${DARKYELLOW}$mongo_service${NORMAL}'s "
  1946. desc+="docker image to actual code version ${WHITE}${after_version}${NORMAL}"
  1947. Wrap -d "$desc" -- \
  1948. compose:file:value-change \
  1949. "${mongo_service}.docker-compose.image" \
  1950. "docker.0k.io/mongo:${after_version}-myc" || exit 1
  1951. echo "${WHITE}Launching final compose${NORMAL}"
  1952. compose up || exit 1
  1953. }
  1954. cmdline.spec.gnu postgres
  1955. cmdline.spec::cmd:postgres:run() {
  1956. :
  1957. }
  1958. cmdline.spec.gnu upgrade
  1959. cmdline.spec:postgres:cmd:upgrade:run() {
  1960. : :posarg: [TARGET_VERSION] "Target version to migrate to"
  1961. : :optval: --service,-s "The postgre service name (defaults to 'postgres')"
  1962. : :optfla: --debug,-d "Display debugging information"
  1963. local URL
  1964. depends yq
  1965. postgres_service="${opt_service:-postgres}"
  1966. available_actions=$(compose --get-available-actions) || exit 1
  1967. available_actionable_services=($(e "$available_actions" | yq 'keys().[]'))
  1968. if [[ " ${available_actionable_services[*]} " != *" $postgres_service "* ]]; then
  1969. err "Service '$postgres_service' was not found in current 'compose.yml'."
  1970. exit 1
  1971. fi
  1972. opts_compose=()
  1973. if [ -n "$opt_debug" ]; then
  1974. opts_compose+=("--debug")
  1975. else
  1976. opts_compose+=("-q")
  1977. fi
  1978. project_name=$(compose:project_name) || exit 1
  1979. containers=($(compose:service:containers "${project_name}" "${postgres_service}")) || exit 1
  1980. ## XXXvlab: quick hack, to make more beautiful later
  1981. cron_container=$(compose:service:containers "${project_name}" "cron")
  1982. containers+=("$cron_container")
  1983. docker stop "$cron_container" >/dev/null 2>&1 || true
  1984. before_version=
  1985. uptodate=
  1986. upgraded=
  1987. msgerr=()
  1988. while read-0a-err errlvl line; do
  1989. echo "$line"
  1990. rline=$(printf "%s" "$line" | sed_compat "s/$__color_sequence_regex//g")
  1991. case "$rline" in
  1992. "II Current postgres version: "*)
  1993. before_version="${rline#II Current postgres version: }"
  1994. ;;
  1995. "II ${postgres_service} is already up-to-date.")
  1996. if [ -z "$before_version" ]; then
  1997. msgerr+=("expected a 'current version' line before the 'up-to-date' one.")
  1998. continue
  1999. fi
  2000. after_version="$before_version"
  2001. uptodate=1
  2002. ;;
  2003. "II Successfully upgraded from ${before_version} to "*)
  2004. after_version="${rline#II Successfully upgraded from ${before_version} to }"
  2005. upgraded=1
  2006. ;;
  2007. *)
  2008. :
  2009. ;;
  2010. esac
  2011. done < <(
  2012. ## -q to remove the display of ``compose`` related information
  2013. ## like relation resolution.
  2014. ## -c on the upgrade action to force color
  2015. ansi_color=yes p-0a-err compose -q -c "${opts_compose[@]}" upgrade "$postgres_service" --no-hint -c "$TARGET_VERSION" 2>&1
  2016. )
  2017. if [ "$errlvl" != 0 ]; then
  2018. exit "$errlvl"
  2019. fi
  2020. if [ -n "$uptodate" ]; then
  2021. for container in "${containers[@]}"; do
  2022. [ -n "$container" ] || continue
  2023. Wrap -d "start ${DARKYELLOW}${postgres_service}${NORMAL}'s container" -- \
  2024. docker start "$container" || {
  2025. err "Failed to start container '$container'."
  2026. exit 1
  2027. }
  2028. done
  2029. exit 0
  2030. fi
  2031. if [ -z "$upgraded" ]; then
  2032. err "Unexpected output of 'upgrade' action with errorlevel 0 and without success"
  2033. exit 1
  2034. fi
  2035. desc="update \`compose.yml\` to set ${DARKYELLOW}$postgres_service${NORMAL}'s "
  2036. desc+="docker image to actual code version ${WHITE}${after_version}${NORMAL}"
  2037. Wrap -d "$desc" -- \
  2038. compose:file:value-change \
  2039. "${postgres_service}.docker-compose.image" \
  2040. "docker.0k.io/postgres:${after_version}-myc" || exit 1
  2041. echo "${WHITE}Launching final compose${NORMAL}"
  2042. compose up || exit 1
  2043. }
  2044. cmdline.spec.gnu bench
  2045. cmdline.spec::cmd:bench:run() {
  2046. depends sysbench
  2047. nbthread=$(lscpu | egrep "^CPU\(s\):" | cut -f 2 -d : | xargs echo)
  2048. single=$(sysbench cpu --cpu-max-prime=20000 run --threads=1 | grep "events per" | cut -f 2 -d : | xargs echo)
  2049. threaded=$(sysbench cpu --cpu-max-prime=20000 run --threads="$nbthread" | grep "events per" | cut -f 2 -d : | xargs echo)
  2050. echo "$threaded / $single / $nbthread"
  2051. }
  2052. cmdline::parse "$@"