You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2213 lines
66 KiB

  1. #!/bin/bash
  2. . /etc/shlib
  3. include common
  4. include parse
  5. include cmdline
  6. include config
  7. include cache
  8. include fn
  9. include docker
  10. [[ "${BASH_SOURCE[0]}" != "${0}" ]] && SOURCED=true
  11. version=0.1
  12. desc='Install backup'
  13. help=""
  14. version_gt() { test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"; }
  15. docker:running-container-projects() {
  16. :cache: scope=session
  17. docker ps --format '{{.Label "com.docker.compose.project"}}' | sort | uniq
  18. }
  19. decorator._mangle_fn docker:running-container-projects
  20. ssh:mk-private-key() {
  21. local host="$1" service_name="$2"
  22. (
  23. settmpdir VPS_TMPDIR
  24. ssh-keygen -t rsa -N "" -f "$VPS_TMPDIR/rsync_rsa" -C "$service_name@$host" >/dev/null
  25. cat "$VPS_TMPDIR/rsync_rsa"
  26. )
  27. }
  28. mailcow:has-images-running() {
  29. local images
  30. images=$(docker ps --format '{{.Image}}' | sort | uniq)
  31. [[ $'\n'"$images" == *$'\n'"mailcow/"* ]]
  32. }
  33. mailcow:has-container-project-mentionning-mailcow() {
  34. local projects
  35. projects=$(docker:running-container-projects) || return 1
  36. [[ $'\n'"$projects"$'\n' == *mailcow* ]]
  37. }
  38. mailcow:has-running-containers() {
  39. mailcow:has-images-running ||
  40. mailcow:has-container-project-mentionning-mailcow
  41. }
  42. mailcow:get-root() {
  43. :cache: scope=session
  44. local dir
  45. for dir in {/opt{,/apps},/root}/mailcow-dockerized; do
  46. [ -d "$dir" ] || continue
  47. [ -r "$dir/mailcow.conf" ] || continue
  48. echo "$dir"
  49. return 0
  50. done
  51. return 1
  52. }
  53. decorator._mangle_fn mailcow:get-root
  54. compose:get-compose-yml() {
  55. :cache: scope=session
  56. local path
  57. path=$(DEBUG=1 DRY_RUN=1 compose 2>&1 | egrep '^\s+-e HOST_COMPOSE_YML_FILE=' | cut -f 2- -d "=" | cut -f 1 -d " ")
  58. [ -e "$path" ] || return 1
  59. echo "$path"
  60. }
  61. decorator._mangle_fn compose:get-compose-yml
  62. export -f compose:get-compose-yml
  63. compose:has-container-project-myc() {
  64. local projects
  65. projects=$(docker:running-container-projects) || return 1
  66. [[ $'\n'"$projects"$'\n' == *$'\n'"myc"$'\n'* ]]
  67. }
  68. compose:file:value-change() {
  69. local key="$1" value="$2"
  70. local compose_yml
  71. if ! compose_yml=$(compose:get-compose-yml); then
  72. err "Couldn't locate your 'compose.yml' file."
  73. return 1
  74. fi
  75. yaml:file:value-change "$compose_yml" "$key" "$value" || return 1
  76. }
  77. export -f compose:file:value-change
  78. yaml:file:value-change() {
  79. local file="$1" key="$2" value="$3" first=1 count=0 diff=""
  80. (
  81. cd "${file%/*}"
  82. while read-0 hunk; do
  83. if [ -n "$first" ]; then
  84. diff+="$hunk"
  85. first=
  86. continue
  87. fi
  88. if [[ "$hunk" =~ $'\n'"+"[[:space:]]+"${key##*.}:" ]]; then
  89. ((count++))
  90. diff+="$hunk" >&2
  91. else
  92. :
  93. # echo "discarding:" >&2
  94. # e "$hunk" | prefix " | " >&2
  95. fi
  96. done < <(
  97. export DEBUG=
  98. settmpdir YQ_TEMP
  99. cp "${file}" "$YQ_TEMP/compose.yml" &&
  100. yq -i ".${key} = \"${value}\"" "$YQ_TEMP/compose.yml" &&
  101. sed -ri 's/^([^# ])/\n\0/g' "$YQ_TEMP/compose.yml" &&
  102. diff -u0 -Z "${file}" "$YQ_TEMP/compose.yml" |
  103. sed -r "s/^(@@.*)$/\x00\1/g;s%^(\+\+\+) [^\t]+%\1 ${file}%g"
  104. printf "\0"
  105. )
  106. if [[ "$count" == 0 ]]; then
  107. err "No change made to '$file'."
  108. return 1
  109. fi
  110. if [[ "$count" != 1 ]]; then
  111. err "compose file change request seems dubious and was refused:"
  112. e "$diff" | prefix " | " >&2
  113. return 1
  114. fi
  115. echo Applying: >&2
  116. e "$diff" | prefix " | " >&2
  117. patch <<<"$diff"
  118. ) || exit 1
  119. }
  120. export -f yaml:file:value-change
  121. type:is-mailcow() {
  122. mailcow:get-root >/dev/null ||
  123. mailcow:has-running-containers
  124. }
  125. type:is-compose() {
  126. compose:get-compose-yml >/dev/null &&
  127. compose:has-container-project-myc
  128. }
  129. vps:get-type() {
  130. :cache: scope=session
  131. local fn
  132. for fn in $(declare -F | cut -f 3 -d " " | egrep "^type:is-"); do
  133. "$fn" && {
  134. echo "${fn#type:is-}"
  135. return 0
  136. }
  137. done
  138. return 1
  139. }
  140. decorator._mangle_fn vps:get-type
  141. mirror-dir:sources() {
  142. :cache: scope=session
  143. if ! shyaml get-values default.sources < /etc/mirror-dir/config.yml; then
  144. err "Couldn't query 'default.sources' in '/etc/mirror-dir/config.yml'."
  145. return 1
  146. fi
  147. }
  148. decorator._mangle_fn mirror-dir:sources
  149. mirror-dir:check-add() {
  150. local elt="$1" sources
  151. sources=$(mirror-dir:sources) || return 1
  152. if [[ $'\n'"$sources"$'\n' == *$'\n'"$elt"$'\n'* ]]; then
  153. info "Volume $elt already in sources"
  154. else
  155. Elt "Adding directory $elt"
  156. sed -i "/sources:/a\ - \"${elt}\"" \
  157. /etc/mirror-dir/config.yml
  158. Feedback || return 1
  159. fi
  160. }
  161. mirror-dir:check-add-vol() {
  162. local elt="$1"
  163. mirror-dir:check-add "/var/lib/docker/volumes/*_${elt}-*/_data"
  164. }
  165. ## The first colon is to prevent auto-export of function from shlib
  166. : ; bash-bug-5() { { cat; } < <(e) >/dev/null; ! cat "$1"; } && bash-bug-5 <(e) 2>/dev/null &&
  167. export BASH_BUG_5=1 && unset -f bash_bug_5
  168. wrap() {
  169. local label="$1" code="$2"
  170. shift 2
  171. export VERBOSE=1
  172. interpreter=/bin/bash
  173. if [ -n "$BASH_BUG_5" ]; then
  174. (
  175. settmpdir tmpdir
  176. fname=${label##*/}
  177. e "$code" > "$tmpdir/$fname" &&
  178. chmod +x "$tmpdir/$fname" &&
  179. Wrap -vsd "$label" -- "$interpreter" "$tmpdir/$fname" "$@"
  180. )
  181. else
  182. Wrap -vsd "$label" -- "$interpreter" <(e "$code") "$@"
  183. fi
  184. }
  185. ping_check() {
  186. #global ignore_ping_check
  187. local host="$1"
  188. ip=$(getent ahosts "$host" | egrep "^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\s+" |
  189. head -n 1 | cut -f 1 -d " ") || return 1
  190. my_ip=$(curl -s myip.kal.fr)
  191. if [ "$ip" != "$my_ip" ]; then
  192. if [ -n "$ignore_ping_check" ]; then
  193. warn "IP of '$host' ($ip) doesn't match mine ($my_ip). Ignoring due to ``--ignore-ping-check`` option."
  194. else
  195. err "IP of '$host' ($ip) doesn't match mine ($my_ip). Use ``--ignore-ping-check`` to ignore check."
  196. return 1
  197. fi
  198. fi
  199. }
  200. mailcow:install-backup() {
  201. local BACKUP_SERVER="$1" ignore_ping_check="$2" mailcow_root DOMAIN
  202. ## find installation
  203. mailcow_root=$(mailcow:get-root) || {
  204. err "Couldn't find a valid mailcow root directory."
  205. return 1
  206. }
  207. ## check ok
  208. DOMAIN=$(cat "$mailcow_root/.env" | grep ^MAILCOW_HOSTNAME= | cut -f 2 -d =) || {
  209. err "Couldn't find MAILCOW_HOSTNAME in file \"$mailcow_root/.env\"."
  210. return 1
  211. }
  212. ping_check "$DOMAIN" || return 1
  213. MYSQL_ROOT_PASSWORD=$(cat "$mailcow_root/.env" | grep ^DBROOT= | cut -f 2 -d =) || {
  214. err "Couldn't find DBROOT in file \"$mailcow_root/.env\"."
  215. return 1
  216. }
  217. if docker compose >/dev/null 2>&1; then
  218. MYSQL_CONTAINER=${MYSQL_CONTAINER:-mailcowdockerized-mysql-mailcow-1}
  219. else
  220. MYSQL_CONTAINER=${MYSQL_CONTAINER:-mailcowdockerized_mysql-mailcow_1}
  221. fi
  222. container_id=$(docker ps -f name="$MYSQL_CONTAINER" --format "{{.ID}}")
  223. if [ -z "$container_id" ]; then
  224. err "Couldn't find docker container named '$MYSQL_CONTAINER'."
  225. return 1
  226. fi
  227. export KEY_BACKUP_ID="mailcow"
  228. export MYSQL_ROOT_PASSWORD
  229. export MYSQL_CONTAINER
  230. export BACKUP_SERVER
  231. export DOMAIN
  232. wrap "Install rsync-backup on host" "
  233. cd /srv/charm-store/rsync-backup
  234. bash ./hooks/install.d/60-install.sh
  235. " || return 1
  236. wrap "Mysql dump install" "
  237. cd /srv/charm-store/mariadb
  238. bash ./hooks/install.d/60-backup.sh
  239. " || return 1
  240. ## Using https://github.com/mailcow/mailcow-dockerized/blob/master/helper-scripts/backup_and_restore.sh
  241. for elt in "vmail{,-attachments-vol}" crypt redis rspamd postfix; do
  242. mirror-dir:check-add-vol "$elt" || return 1
  243. done
  244. mirror-dir:check-add "$mailcow_root" || return 1
  245. mirror-dir:check-add "/var/backups/mysql" || return 1
  246. mirror-dir:check-add "/etc" || return 1
  247. dest="$BACKUP_SERVER"
  248. dest="${dest%/*}"
  249. ssh_options=()
  250. if [[ "$dest" == *":"* ]]; then
  251. port="${dest##*:}"
  252. dest="${dest%%:*}"
  253. ssh_options=(-p "$port")
  254. else
  255. port=""
  256. dest="${dest%%:*}"
  257. fi
  258. info "You can run this following command from an host having admin access to $dest:"
  259. echo " (Or send it to a backup admin of $dest)" >&2
  260. echo "ssh ${ssh_options[@]} myadmin@$dest ssh-key add '$(cat /var/lib/rsync/.ssh/id_rsa.pub)'"
  261. }
  262. compose:has_domain() {
  263. local compose_file="$1" host="$2" name conf relation relation_value domain server_aliases
  264. while read-0 name conf ; do
  265. name=$(e "$name" | shyaml get-value)
  266. if [[ "$name" =~ ^[a-zA-Z0-9-]+\.[a-zA-Z0-9-]+ ]]; then
  267. [ "$host" == "$name" ] && return 0
  268. fi
  269. rel=$(e "$conf" | shyaml -y get-value relations 2>/dev/null) || continue
  270. for relation in web-proxy publish-dir; do
  271. relation_value=$(e "$rel" | shyaml -y get-value "$relation" 2>/dev/null) || continue
  272. while read-0 label conf_relation; do
  273. domain=$(e "$conf_relation" | shyaml get-value "domain" 2>/dev/null) && {
  274. [ "$host" == "$domain" ] && return 0
  275. }
  276. server_aliases=$(e "$conf_relation" | shyaml get-values "server-aliases" 2>/dev/null) && {
  277. [[ $'\n'"$server_aliases" == *$'\n'"$host"$'\n'* ]] && return 0
  278. }
  279. done < <(e "$relation_value" | shyaml -y key-values-0)
  280. done
  281. done < <(shyaml -y key-values-0 < "$compose_file")
  282. return 1
  283. }
  284. compose:install-backup() {
  285. local BACKUP_SERVER="$1" service_name="$2" compose_file="$3" ignore_ping_check="$4" ignore_domain_check="$5"
  286. ## XXXvlab: far from perfect as it mimics and depends internal
  287. ## logic of current default way to get a domain in compose-core
  288. host=$(hostname)
  289. if ! compose:has_domain "$compose_file" "$host"; then
  290. if [ -n "$ignore_domain_check" ]; then
  291. warn "domain of '$host' not found in compose file '$compose_file'. Ignoring due to ``--ignore-domain-check`` option."
  292. else
  293. err "domain of '$host' not found in compose file '$compose_file'. Use ``--ignore-domain-check`` to ignore check."
  294. return 1
  295. fi
  296. fi
  297. ping_check "$host" || return 1
  298. if [ -e "/root/.ssh/rsync_rsa" ]; then
  299. warn "deleting private key in /root/.ssh/rsync_rsa, has we are not using it anymore."
  300. rm -fv /root/.ssh/rsync_rsa
  301. fi
  302. if [ -e "/root/.ssh/rsync_rsa.pub" ]; then
  303. warn "deleting public key in /root/.ssh/rsync_rsa.pub, has we are not using it anymore."
  304. rm -fv /root/.ssh/rsync_rsa.pub
  305. fi
  306. if service_cfg=$(cat "$compose_file" |
  307. shyaml get-value -y "$service_name" 2>/dev/null); then
  308. info "Entry for service ${DARKYELLOW}$service_name${NORMAL}" \
  309. "is already present in '$compose_file'."
  310. cfg=$(e "$service_cfg" | shyaml get-value -y options) || {
  311. err "No ${WHITE}options${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
  312. "entry in '$compose_file'."
  313. return 1
  314. }
  315. private_key=$(e "$cfg" | shyaml get-value private-key) || return 1
  316. target=$(e "$cfg" | shyaml get-value target) || return 1
  317. if [ "$target" != "$BACKUP_SERVER" ]; then
  318. err "Existing backup target '$target' is different" \
  319. "from specified '$BACKUP_SERVER'"
  320. return 1
  321. fi
  322. else
  323. private_key=$(ssh:mk-private-key "$host" "$service_name")
  324. cat <<EOF >> "$compose_file"
  325. $service_name:
  326. options:
  327. ident: $host
  328. target: $BACKUP_SERVER
  329. private-key: |
  330. $(e "$private_key" | sed -r 's/^/ /g')
  331. EOF
  332. fi
  333. dest="$BACKUP_SERVER"
  334. dest="${dest%/*}"
  335. ssh_options=()
  336. if [[ "$dest" == *":"* ]]; then
  337. port="${dest##*:}"
  338. dest="${dest%%:*}"
  339. ssh_options=(-p "$port")
  340. else
  341. port=""
  342. dest="${dest%%:*}"
  343. fi
  344. info "You can run this following command from an host having admin access to $dest:"
  345. echo " (Or send it to a backup admin of $dest)" >&2
  346. ## We remove ending label (label will be added or not in the
  347. ## private key, and thus here, depending on the version of
  348. ## openssh-client)
  349. public_key=$(ssh-keygen -y -f <(e "$private_key"$'\n') | sed -r 's/ [^ ]+@[^ ]+$//')
  350. echo "ssh ${ssh_options[@]} myadmin@$dest ssh-key add '$public_key compose@$host'"
  351. }
  352. backup-action() {
  353. local action="$1"
  354. shift
  355. vps_type=$(vps:get-type) || {
  356. err "Failed to get type of installation."
  357. return 1
  358. }
  359. if ! fn.exists "${vps_type}:${action}"; then
  360. err "type '${vps_type}' has no ${vps_type}:${action} implemented yet."
  361. return 1
  362. fi
  363. "${vps_type}:${action}" "$@"
  364. }
  365. compose:get_default_backup_host_ident() {
  366. local service_name="$1" ## Optional
  367. local compose_file service_cfg cfg target
  368. compose_file=$(compose:get-compose-yml)
  369. service_name="${service_name:-rsync-backup}"
  370. if ! service_cfg=$(cat "$compose_file" |
  371. shyaml get-value -y "$service_name" 2>/dev/null); then
  372. err "No service named '$service_name' found in 'compose.yml'."
  373. return 1
  374. fi
  375. cfg=$(e "$service_cfg" | shyaml get-value -y options) || {
  376. err "No ${WHITE}options${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
  377. "entry in '$compose_file'."
  378. return 1
  379. }
  380. if ! target=$(e "$cfg" | shyaml get-value target); then
  381. err "No ${WHITE}options.target${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
  382. "entry in '$compose_file'."
  383. fi
  384. if ! target=$(e "$cfg" | shyaml get-value target); then
  385. err "No ${WHITE}options.target${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
  386. "entry in '$compose_file'."
  387. fi
  388. if ! ident=$(e "$cfg" | shyaml get-value ident); then
  389. err "No ${WHITE}options.ident${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
  390. "entry in '$compose_file'."
  391. fi
  392. echo "$target $ident"
  393. }
  394. mailcow:get_default_backup_host_ident() {
  395. local content cron_line ident found dest cmd_line
  396. if ! [ -e "/etc/cron.d/mirror-dir" ]; then
  397. err "No '/etc/cron.d/mirror-dir' found."
  398. return 1
  399. fi
  400. content=$(cat /etc/cron.d/mirror-dir) || {
  401. err "Can't read '/etc/cron.d/mirror-dir'."
  402. return 1
  403. }
  404. if ! cron_line=$(e "$content" | grep "mirror-dir backup"); then
  405. err "Can't find 'mirror-dir backup' line in '/etc/cron.d/mirror-dir'."
  406. return 1
  407. fi
  408. cron_line=${cron_line%|*}
  409. cmd_line=(${cron_line#*root})
  410. found=
  411. dest=
  412. for arg in "${cmd_line[@]}"; do
  413. [ -n "$found" ] && {
  414. dest="$arg"
  415. break
  416. }
  417. [ "$arg" == "-d" ] && {
  418. found=1
  419. }
  420. done
  421. if ! [[ "$dest" =~ ^[\'\"a-zA-Z0-9:/.-]+$ ]]; then
  422. err "Can't find valid destination in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'."
  423. return 1
  424. fi
  425. if [[ "$dest" == \"*\" ]] || [[ "$dest" == \'*\' ]]; then
  426. ## unquoting, the eval should be safe because of previous check
  427. dest=$(eval e "$dest")
  428. fi
  429. if [ -z "$dest" ]; then
  430. err "Can't find destination in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'."
  431. return 1
  432. fi
  433. ## looking for ident
  434. found=
  435. ident=
  436. for arg in "${cmd_line[@]}"; do
  437. [ -n "$found" ] && {
  438. ident="$arg"
  439. break
  440. }
  441. [ "$arg" == "-h" ] && {
  442. found=1
  443. }
  444. done
  445. if ! [[ "$ident" =~ ^[\'\"a-zA-Z0-9.-]+$ ]]; then
  446. err "Can't find valid identifier in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'."
  447. return 1
  448. fi
  449. if [[ "$ident" == \"*\" ]] || [[ "$ident" == \'*\' ]]; then
  450. ## unquoting, the eval should be safe because of previous check
  451. ident=$(eval e "$ident")
  452. fi
  453. if [ -z "$ident" ]; then
  454. err "Can't find destination in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'."
  455. return 1
  456. fi
  457. echo "$dest $ident"
  458. }
  459. compose:service:containers() {
  460. local project="$1" service="$2"
  461. docker ps \
  462. --filter label="com.docker.compose.project=$project" \
  463. --filter label="compose.master-service=$service" \
  464. --format="{{.ID}}"
  465. }
  466. export -f compose:service:containers
  467. compose:service:container_one() {
  468. local project="$1" service="$2" container_id
  469. {
  470. read-0a container_id || {
  471. err "service ${DARKYELLOW}$service${NORMAL} has no running container."
  472. return 1
  473. }
  474. if read-0a _; then
  475. err "service ${DARKYELLOW}$service${NORMAL} has more than one running container."
  476. return 1
  477. fi
  478. } < <(compose:service:containers "$project" "$service")
  479. echo "$container_id"
  480. }
  481. export -f compose:service:container_one
  482. compose:service:container_first() {
  483. local project="$1" service="$2" container_id
  484. {
  485. read-0a container_id || {
  486. err "service ${DARKYELLOW}$service${NORMAL} has no running container."
  487. return 1
  488. }
  489. if read-0a _; then
  490. warn "service ${DARKYELLOW}$service${NORMAL} has more than one running container."
  491. fi
  492. } < <(compose:service:containers "$project" "$service")
  493. echo "$container_id"
  494. }
  495. export -f compose:service:container_first
  496. docker:running_containers() {
  497. :cache: scope=session
  498. docker ps --format="{{.ID}}"
  499. }
  500. decorator._mangle_fn docker:running_containers
  501. export -f docker:running_containers
  502. compose:project:containers() {
  503. local project="$1" opts
  504. opts+=(--filter label="com.docker.compose.project=$project")
  505. docker ps "${opts[@]}" \
  506. --format="{{.ID}}"
  507. }
  508. export -f compose:project:containers
  509. compose:charm:containers() {
  510. local project="$1" charm="$2"
  511. docker ps \
  512. --filter label="com.docker.compose.project=$project" \
  513. --filter label="compose.charm=$charm" \
  514. --format="{{.ID}}"
  515. }
  516. export -f compose:charm:containers
  517. compose:charm:container_one() {
  518. local project="$1" charm="$2" container_id
  519. {
  520. read-0a container_id || {
  521. err "charm ${DARKPINK}$charm${NORMAL} has no running container in project '$project'."
  522. return 1
  523. }
  524. if read-0a _; then
  525. err "charm ${DARKPINK}$charm${NORMAL} has more than one running container."
  526. return 1
  527. fi
  528. } < <(compose:charm:containers "$project" "$charm")
  529. echo "$container_id"
  530. }
  531. export -f compose:charm:container_one
  532. compose:charm:container_first() {
  533. local project="$1" charm="$2" container_id
  534. {
  535. read-0a container_id || {
  536. warn "charm ${DARKYELLOW}$charm${NORMAL} has no running container in project '$project'."
  537. }
  538. if read-0a _; then
  539. warn "charm ${DARKYELLOW}$charm${NORMAL} has more than one running container."
  540. fi
  541. } < <(compose:charm:containers "$project" "$charm")
  542. echo "$container_id"
  543. }
  544. export -f compose:charm:container_first
  545. compose:get_url() {
  546. local project_name="$1" service="$2" data_file network ip
  547. data_file="/var/lib/compose/relations/${project_name}/${service}-frontend/web-proxy/data"
  548. if [ -e "$data_file" ]; then
  549. (
  550. set -o pipefail
  551. cat "$data_file" | shyaml get-value url
  552. )
  553. else
  554. ## Assume there are no frontend relation here, the url is direct IP
  555. container_id=$(compose:service:container_one "${project_name}" "${service}") || return 1
  556. network_ip=$(docker:container:network_ip_one "${container_id}") || return 1
  557. IFS=":" read -r network ip <<<"$network_ip"
  558. tcp_port=
  559. for port in $(docker:exposed_ports "$container_id"); do
  560. IFS="/" read port type <<<"$port"
  561. [ "$type" == "tcp" ] || continue
  562. tcp_port="$port"
  563. break
  564. done
  565. echo -n "http://$ip"
  566. [ -n "$tcp_port" ] && echo ":$tcp_port"
  567. fi || {
  568. err "Failed querying ${service} to frontend relation to get url."
  569. return 1
  570. }
  571. }
  572. export -f compose:get_url
  573. compose:container:service() {
  574. local container="$1" service
  575. if ! service=$(docker:container:label "$container" "compose.service"); then
  576. err "Failed to get service name from container ${container}."
  577. return 1
  578. fi
  579. if [ -z "$service" ]; then
  580. err "No service found for container ${container}."
  581. return 1
  582. fi
  583. echo "$service"
  584. }
  585. export -f compose:container:service
  586. compose:psql() {
  587. local project_name="$1" dbname="$2" container_id
  588. shift 2
  589. container_id=$(compose:charm:container_one "$project_name" "postgres") || return 1
  590. docker exec -i "${container_id}" psql -U postgres "$dbname" "$@"
  591. }
  592. export -f compose:psql
  593. compose:mongo() {
  594. local project_name="$1" dbname="$2" container_id
  595. container_id=$(compose:charm:container_one "$project_name" "mongo") || return 1
  596. docker exec -i "${container_id}" mongo --quiet "$dbname"
  597. }
  598. export -f compose:mongo
  599. compose:pgm() {
  600. local project_name="$1" container_network_ip container_ip container_network
  601. shift
  602. container_id=$(compose:charm:container_one "$project_name" "postgres") || return 1
  603. service_name=$(compose:container:service "$container_id") || return 1
  604. image_id=$(docker:container:image "$container_id") || return 1
  605. container_network_ip=$(docker:container:network_ip_one "$container_id") || return 1
  606. IFS=":" read -r container_network container_ip <<<"$container_network_ip"
  607. pgpass="/srv/datastore/data/${service_name}/var/lib/postgresql/data/pgpass"
  608. local final_pgm_docker_run_opts+=(
  609. -u 0 -e prefix_pg_local_command=" "
  610. --network "${container_network}"
  611. -e PGHOST="$container_ip"
  612. -e PGUSER=postgres
  613. -v "$pgpass:/root/.pgpass"
  614. "${pgm_docker_run_opts[@]}"
  615. )
  616. cmd=(docker run --rm \
  617. "${final_pgm_docker_run_opts[@]}" \
  618. "${image_id}" pgm "$@"
  619. )
  620. echo "${cmd[@]}"
  621. "${cmd[@]}"
  622. }
  623. export -f compose:pgm
  624. postgres:dump() {
  625. local project_name="$1" src="$2" dst="$3"
  626. (
  627. settmpdir PGM_TMP_LOCATION
  628. pgm_docker_run_opts=('-v' "${PGM_TMP_LOCATION}:/tmp/dump")
  629. compose:pgm "$project_name" cp -f "$src" "/tmp/dump/dump.gz" &&
  630. mv "$PGM_TMP_LOCATION/dump.gz" "$dst"
  631. ) || return 1
  632. }
  633. export -f postgres:dump
  634. postgres:restore() {
  635. local project_name="$1" src="$2" dst="$3"
  636. full_src_path=$(readlink -e "$src") || exit 1
  637. (
  638. pgm_docker_run_opts=('-v' "${full_src_path}:/tmp/dump.gz")
  639. compose:pgm "$project_name" cp -f "/tmp/dump.gz" "$dst"
  640. ) || return 1
  641. }
  642. export -f postgres:restore
  643. odoo:get_public_user_id() {
  644. local project_name="$1" dbname="$2"
  645. echo "select res_id from ir_model_data where model = 'res.users' and name = 'public_user';" |
  646. compose:psql "$project_name" "$dbname" -qAt
  647. }
  648. cyclos:set_root_url() {
  649. local project_name="$1" dbname="$2" url="$3"
  650. echo "UPDATE configurations SET root_url = '$url';" |
  651. compose:psql "$project_name" "$dbname" || {
  652. err "Failed to set cyclos url value in '$dbname' database."
  653. return 1
  654. }
  655. }
  656. export -f cyclos:set_root_url
  657. cyclos:unlock() {
  658. local project_name="$1" dbname="$2"
  659. echo "delete from database_lock;" |
  660. compose:psql "${project_name}" "${dbname}"
  661. }
  662. export -f cyclos:unlock
  663. rocketchat:drop-indexes() {
  664. local project_name="$1" dbname="$2"
  665. echo "db.users.dropIndexes()" |
  666. compose:mongo "${project_name}" "${dbname}"
  667. }
  668. export -f rocketchat:drop-indexes
  669. compose:project_name() {
  670. if [ -z "$PROJECT_NAME" ]; then
  671. PROJECT_NAME=$(compose --get-project-name) || {
  672. err "Couldn't get project name."
  673. return 1
  674. }
  675. if [ -z "$PROJECT_NAME" -o "$PROJECT_NAME" == "orphan" ]; then
  676. err "Couldn't get project name, probably because 'compose.yml' wasn't found."
  677. echo " Please ensure to either configure a global 'compose.yml' or run this command" >&2
  678. echo " in a compose project (with 'compose.yml' on the top level directory)." >&2
  679. return 1
  680. fi
  681. export PROJECT_NAME
  682. fi
  683. echo "$PROJECT_NAME"
  684. }
  685. export -f compose:project_name
  686. compose:get_cron_docker_cmd() {
  687. local cron_line cmd_line docker_cmd
  688. project_name=$(compose:project_name) || return 1
  689. if ! cron_line=$(docker exec "${project_name}"_cron_1 cat /etc/cron.d/rsync-backup | grep "\* \* \*"); then
  690. err "Can't find cron_line in cron container."
  691. echo " Have you forgotten to run 'compose up' ?" >&2
  692. return 1
  693. fi
  694. cron_line=${cron_line%|*}
  695. cron_line=${cron_line%"2>&1"*}
  696. cmd_line="${cron_line#*root}"
  697. eval "args=($cmd_line)"
  698. ## should be last argument
  699. docker_cmd=$(echo ${args[@]: -1})
  700. if ! [[ "$docker_cmd" == "docker run --rm -e "* ]]; then
  701. echo "docker command found should start with 'docker run'." >&2
  702. echo "Here's command:" >&2
  703. echo " $docker_cmd" >&2
  704. return 1
  705. fi
  706. e "$docker_cmd"
  707. }
  708. compose:recover-target() {
  709. local backup_host="$1" ident="$2" src="$3" dst="$4" service_name="${5:-rsync-backup}" project_name
  710. project_name=$(compose:project_name) || return 1
  711. docker_image="${project_name}_${service_name}"
  712. if ! docker_has_image "$docker_image"; then
  713. compose build "${service_name}" || {
  714. err "Couldn't find nor build image for service '$service_name'."
  715. return 1
  716. }
  717. fi
  718. dst="${dst%/}" ## remove final slash
  719. ssh_options=(-o StrictHostKeyChecking=no)
  720. if [[ "$backup_host" == *":"* ]]; then
  721. port="${backup_host##*:}"
  722. backup_host="${backup_host%%:*}"
  723. ssh_options+=(-p "$port")
  724. else
  725. port=""
  726. backup_host="${backup_host%%:*}"
  727. fi
  728. rsync_opts=(
  729. -e "ssh ${ssh_options[*]} -i /var/lib/rsync/.ssh/id_rsa -l rsync"
  730. -azvArH --delete --delete-excluded
  731. --partial --partial-dir .rsync-partial
  732. --numeric-ids
  733. )
  734. if [ "$DRY_RUN" ]; then
  735. rsync_opts+=("-n")
  736. fi
  737. cmd=(
  738. docker run --rm --entrypoint rsync \
  739. -v "/srv/datastore/config/${service_name}/var/lib/rsync":/var/lib/rsync \
  740. -v "${dst%/*}":/mnt/dest \
  741. "$docker_image" \
  742. "${rsync_opts[@]}" "$backup_host":"/var/mirror/$ident/$src" "/mnt/dest/${dst##*/}"
  743. )
  744. echo "${WHITE}Launching: ${NORMAL} ${cmd[@]}"
  745. "${cmd[@]}"
  746. }
  747. mailcow:recover-target() {
  748. local backup_host="$1" ident="$2" src="$3" dst="$4"
  749. dst="${dst%/}" ## remove final slash
  750. ssh_options=(-o StrictHostKeyChecking=no)
  751. if [[ "$backup_host" == *":"* ]]; then
  752. port="${backup_host##*:}"
  753. backup_host="${backup_host%%:*}"
  754. ssh_options+=(-p "$port")
  755. else
  756. port=""
  757. backup_host="${backup_host%%:*}"
  758. fi
  759. rsync_opts=(
  760. -e "ssh ${ssh_options[*]} -i /var/lib/rsync/.ssh/id_rsa -l rsync"
  761. -azvArH --delete --delete-excluded
  762. --partial --partial-dir .rsync-partial
  763. --numeric-ids
  764. )
  765. if [ "$DRY_RUN" ]; then
  766. rsync_opts+=("-n")
  767. fi
  768. cmd=(
  769. rsync "${rsync_opts[@]}" "$backup_host":"/var/mirror/$ident/$src" "${dst}"
  770. )
  771. echo "${WHITE}Launching: ${NORMAL} ${cmd[@]}"
  772. "${cmd[@]}"
  773. }
  774. nextcloud:src:version() {
  775. local version
  776. if ! version=$(cat "/srv/datastore/data/${nextcloud_service}/var/www/html/version.php" 2>/dev/null); then
  777. err "Can't find version.php file to get last version installed."
  778. exit 1
  779. fi
  780. version=$(e "$version" | grep 'VersionString =' | cut -f 3 -d ' ' | cut -f 2 -d "'")
  781. if [ -z "$version" ]; then
  782. err "Can't figure out version from version.php content."
  783. exit 1
  784. fi
  785. echo "$version"
  786. }
  787. container:health:check-fix:container-aliveness() {
  788. local container_id="$1"
  789. timeout 5s docker inspect "$container_id" >/dev/null 2>&1
  790. errlvl=$?
  791. if [ "$errlvl" == 124 ]; then
  792. service_name=$(docker ps --filter id="$container_id" --format '{{.Label "com.docker.compose.service"}}')
  793. container_name=($(docker ps --filter id="$container_id" --format '{{.Names}}'))
  794. pid=$(ps ax -o pid,command -ww | grep docker-containerd-shim |
  795. grep "/$container_id" |
  796. sed -r 's/^ *//g' |
  797. cut -f 1 -d " ")
  798. if [ -z "$pid" ]; then
  799. err "container ${DARKCYAN}${container_name[0]}${NORMAL} for ${DARKYELLOW}$service_name${NORMAL} doesn't answer to 'inspect' command. Can't find its PID neither."
  800. return 1
  801. fi
  802. echo "container ${DARKCYAN}${container_name[0]}${NORMAL} for ${DARKYELLOW}$service_name${NORMAL} doesn't answer to 'inspect' command (pid: $pid)."
  803. Wrap -d "kill pid $pid and restart" <<EOF
  804. kill "$pid"
  805. sleep 2
  806. docker restart "$container_id"
  807. EOF
  808. fi
  809. return $errlvl
  810. }
  811. container:health:check-fix:no-matching-entries() {
  812. local container_id="$1"
  813. out=$(docker exec "$container_id" echo 2>&1)
  814. errlvl=$?
  815. [ "$errlvl" == 0 ] && return 0
  816. service_name=$(docker ps --filter id="$container_id" --format '{{.Label "com.docker.compose.service"}}')
  817. container_name=($(docker ps --filter id="$container_id" --format '{{.Names}}'))
  818. if [ "$errlvl" == 126 ] && [[ "$out" == *"no matching entries in passwd file"* ]]; then
  819. echo "container ${DARKCYAN}${container_name[0]}${NORMAL} for ${DARKYELLOW}$service_name${NORMAL} has ${DARKRED}no-matching-entries${NORMAL} bug." >&2
  820. Wrap -d "restarting container of ${DARKYELLOW}$service_name${NORMAL} twice" <<EOF
  821. docker restart "$container_id"
  822. sleep 2
  823. docker restart "$container_id"
  824. EOF
  825. return $errlvl
  826. fi
  827. warn "Unknown issue with ${DARKYELLOW}$service_name${NORMAL}'s container:"
  828. echo " ${WHITE}cmd:${NORMAL} docker exec -ti $container_id echo" >&2
  829. echo "$out" | prefix " ${DARKGRAY}|${NORMAL} " >&2
  830. echo " ${DARKGRAY}..${NORMAL} leaving this as-is."
  831. return $errlvl
  832. }
  833. docker:api() {
  834. local endpoint="$1"
  835. curl -sS --unix-socket /var/run/docker.sock "http://localhost$endpoint"
  836. }
  837. docker:containers:id() {
  838. docker:api /containers/json | jq -r ".[] | .Id"
  839. }
  840. docker:containers:names() {
  841. docker:api /containers/json | jq -r '.[] | .Names[0] | ltrimstr("/")'
  842. }
  843. docker:container:stats() {
  844. container="$1"
  845. docker:api "/containers/$container/stats?stream=false"
  846. }
  847. docker:containers:stats() {
  848. :cache: scope=session
  849. local jobs='' line container id_names sha names name data service project
  850. local DC="com.docker.compose"
  851. local PSF_values=(
  852. ".ID" ".Names" ".Label \"$DC.project\"" ".Label \"$DC.service\"" ".Image"
  853. )
  854. local PSF="$(printf "{{%s}} " "${PSF_values[@]}")"
  855. id_names=$(docker ps -a --format="$PSF") || return 1
  856. ## Create a docker container table from name/sha to service, project, image_name
  857. declare -A resolve
  858. while read-0a line; do
  859. sha=${line%% *}; line=${line#* }
  860. names=${line%% *}; line=${line#* }
  861. names=(${names//,/ })
  862. for name in "${names[@]}"; do
  863. resolve["$name"]="$line"
  864. done
  865. resolve["$sha"]="$line"
  866. done < <(printf "%s\n" "$id_names")
  867. declare -A data
  868. while read-0a line; do
  869. name=${line%% *}; line=${line#* }
  870. ts=${line%% *}; line=${line#* }
  871. resolved="${resolve["$name"]}"
  872. project=${resolved%% *}; resolved=${resolved#* }
  873. service=${resolved%% *}; resolved=${resolved#* }
  874. image_name="$resolved"
  875. if [ -z "$service" ]; then
  876. project="@"
  877. service=$(docker inspect "$image_name" | jq -r '.[0].RepoTags[0]')
  878. service=${service//\//_}
  879. fi
  880. if [ -n "${data["$project/$service"]}" ]; then
  881. previous=(${data["$project/$service"]})
  882. previous=(${previous[@]:1})
  883. current=($line)
  884. sum=()
  885. i=0; max=${#previous[@]}
  886. while (( i < max )); do
  887. sum+=($((${previous[$i]} + ${current[$i]})))
  888. ((i++))
  889. done
  890. data["$project/$service"]="$ts ${sum[*]}"
  891. else
  892. data["$project/$service"]="$ts $line"
  893. fi
  894. done < <(
  895. for container in "$@"; do
  896. (
  897. docker:container:stats "${container}" |
  898. jq -r '
  899. (.name | ltrimstr("/"))
  900. + " " + (.read | sub("\\.[0-9]+Z"; "Z") | fromdate | tostring)
  901. + " " + (.memory_stats.usage | tostring)
  902. + " " + (.memory_stats.stats.inactive_file | tostring)
  903. + " " + ((.memory_stats.usage - .memory_stats.stats.inactive_file) | tostring)
  904. + " " + (.memory_stats.limit | tostring)
  905. + " " + (.networks.eth0.rx_bytes | tostring)
  906. + " " + (.networks.eth0.rx_packets | tostring)
  907. + " " + (.networks.eth0.rx_errors | tostring)
  908. + " " + (.networks.eth0.rx_dropped | tostring)
  909. + " " + (.networks.eth0.tx_bytes | tostring)
  910. + " " + (.networks.eth0.tx_packets | tostring)
  911. + " " + (.networks.eth0.tx_errors | tostring)
  912. + " " + (.networks.eth0.tx_dropped | tostring)
  913. '
  914. ) &
  915. jobs=1
  916. done
  917. [ -n "$jobs" ] && wait
  918. )
  919. for label in "${!data[@]}"; do
  920. echo "$label ${data[$label]}"
  921. done
  922. }
  923. decorator._mangle_fn docker:containers:stats
  924. export -f docker:containers:stats
  925. col:normalize:size() {
  926. local alignment=$1
  927. awk -v alignment="$alignment" '{
  928. # Store the entire line in the lines array.
  929. lines[NR] = $0;
  930. # Split the line into fields.
  931. split($0, fields);
  932. # Update max for each field.
  933. for (i = 1; i <= length(fields); i++) {
  934. if (length(fields[i]) > max[i]) {
  935. max[i] = length(fields[i]);
  936. }
  937. }
  938. }
  939. END {
  940. # Print lines with fields padded to max.
  941. for (i = 1; i <= NR; i++) {
  942. split(lines[i], fields);
  943. line = "";
  944. for (j = 1; j <= length(fields); j++) {
  945. # Get alignment for the current field.
  946. align = substr(alignment, j, 1);
  947. if (align != "+") {
  948. align = "-"; # Default to left alignment if not "+".
  949. }
  950. line = line sprintf("%" align max[j] "s ", fields[j]);
  951. }
  952. print line;
  953. }
  954. }'
  955. }
  956. rrd:create() {
  957. local prefix="$1"
  958. shift
  959. local label="$1" step="300" src_def
  960. shift
  961. if [ -z "$VAR_DIR" ]; then
  962. err "Unset \$VAR_DIR, can't create rrd graph"
  963. return 1
  964. fi
  965. mkdir -p "$VAR_DIR"
  966. if ! [ -d "$VAR_DIR" ]; then
  967. err "Invalid \$VAR_DIR: '$VAR_DIR' is not a directory"
  968. return 1
  969. fi
  970. if ! type -p rrdtool >/dev/null 2>&1; then
  971. apt-get install rrdtool -y --force-yes </dev/null
  972. if ! type -p rrdtool 2>/dev/null 2>&1; then
  973. err "Couldn't find nor install 'rrdtool'."
  974. return 1
  975. fi
  976. fi
  977. local RRD_PATH="$VAR_DIR/rrd"
  978. local RRD_FILE="$RRD_PATH/$prefix/$label.rrd"
  979. mkdir -p "${RRD_FILE%/*}"
  980. if [ -f "$RRD_FILE" ]; then
  981. err "File '$RRD_FILE' already exists, use a different label."
  982. return 1
  983. fi
  984. local rrd_ds_opts=()
  985. for src_def in "$@"; do
  986. IFS=":" read -r name type min max rra_types <<<"$src_def"
  987. rra_types=${rra_types:-average,max,min}
  988. rrd_ds_opts+=("DS:$name:$type:900:$min:$max")
  989. done
  990. local step=120
  991. local times=( ## with steps 120 is 2mn datapoint
  992. 2m:1w
  993. 6m:3w
  994. 30m:12w
  995. 3h:1y
  996. 1d:10y
  997. 1w:2080w
  998. )
  999. rrd_rra_opts=()
  1000. for time in "${times[@]}"; do
  1001. rrd_rra_opts+=("RRA:"{AVERAGE,MIN,MAX}":0.5:$time")
  1002. done
  1003. cmd=(
  1004. rrdtool create "$RRD_FILE" \
  1005. --step "$step" \
  1006. "${rrd_ds_opts[@]}" \
  1007. "${rrd_rra_opts[@]}"
  1008. )
  1009. "${cmd[@]}" || {
  1010. err "Failed command: ${cmd[@]}"
  1011. return 1
  1012. }
  1013. }
  1014. rrd:update() {
  1015. local prefix="$1"
  1016. shift
  1017. while read-0a data; do
  1018. [ -z "$data" ] && continue
  1019. IFS="~" read -ra data <<<"${data// /\~}"
  1020. label="${data[0]}"
  1021. ts="${data[1]}"
  1022. for arg in "$@"; do
  1023. IFS="|" read -r name arg <<<"$arg"
  1024. rrd_label="${label}/${name}"
  1025. rrd_create_opt=()
  1026. rrd_update_opt="$ts"
  1027. for col_def in ${arg//,/ }; do
  1028. col=${col_def%%:*}; create_def=${col_def#*:}
  1029. rrd_update_opt="${rrd_update_opt}:${data[$col]}"
  1030. rrd_create_opt+=("$create_def")
  1031. done
  1032. local RRD_ROOT_PATH="$VAR_DIR/rrd"
  1033. local RRD_PATH="$RRD_ROOT_PATH/${prefix%/}"
  1034. local RRD_FILE="${RRD_PATH%/}/${rrd_label#/}.rrd"
  1035. if ! [ -f "$RRD_FILE" ]; then
  1036. info "Creating new RRD file '${RRD_FILE#$RRD_ROOT_PATH/}'"
  1037. if ! rrd:create "$prefix" "${rrd_label}" "${rrd_create_opt[@]}" </dev/null ; then
  1038. err "Couldn't create new RRD file ${rrd_label} with options: '${rrd_create_opt[*]}'"
  1039. return 1
  1040. fi
  1041. fi
  1042. rrdtool update "$RRD_FILE" "$rrd_update_opt" || {
  1043. err "update failed with options: '$rrd_update_opt'"
  1044. return 1
  1045. }
  1046. done
  1047. done
  1048. }
  1049. [ "$SOURCED" ] && return 0
  1050. ##
  1051. ## Command line processing
  1052. ##
  1053. cmdline.spec.gnu
  1054. cmdline.spec.reporting
  1055. cmdline.spec.gnu install
  1056. cmdline.spec::cmd:install:run() {
  1057. :
  1058. }
  1059. cmdline.spec.gnu get-type
  1060. cmdline.spec::cmd:get-type:run() {
  1061. vps:get-type
  1062. }
  1063. cmdline.spec:install:cmd:backup:run() {
  1064. : :posarg: BACKUP_SERVER 'Target backup server'
  1065. : :optfla: --ignore-domain-check \
  1066. "Allow to bypass the domain check in
  1067. compose file (only used in compose
  1068. installation)."
  1069. : :optfla: --ignore-ping-check "Allow to bypass the ping check of host."
  1070. local vps_type
  1071. vps_type=$(vps:get-type) || {
  1072. err "Failed to get type of installation."
  1073. return 1
  1074. }
  1075. if ! fn.exists "${vps_type}:install-backup"; then
  1076. err "type '${vps_type}' has no backup installation implemented yet."
  1077. return 1
  1078. fi
  1079. opts=()
  1080. [ "$opt_ignore_ping_check" ] &&
  1081. opts+=("--ignore-ping-check")
  1082. if [ "$vps_type" == "compose" ]; then
  1083. [ "$opt_ignore_domain_check" ] &&
  1084. opts+=("--ignore-domain-check")
  1085. fi
  1086. "cmdline.spec:install:cmd:${vps_type}-backup:run" "${opts[@]}" "$BACKUP_SERVER"
  1087. }
  1088. DEFAULT_BACKUP_SERVICE_NAME=rsync-backup
  1089. cmdline.spec.gnu compose-backup
  1090. cmdline.spec:install:cmd:compose-backup:run() {
  1091. : :posarg: BACKUP_SERVER 'Target backup server'
  1092. : :optval: --service-name,-s "YAML service name in compose
  1093. file to check for existence of key.
  1094. Defaults to '$DEFAULT_BACKUP_SERVICE_NAME'"
  1095. : :optval: --compose-file,-f "Compose file location. Defaults to
  1096. the value of '\$DEFAULT_COMPOSE_FILE'"
  1097. : :optfla: --ignore-domain-check \
  1098. "Allow to bypass the domain check in
  1099. compose file."
  1100. : :optfla: --ignore-ping-check "Allow to bypass the ping check of host."
  1101. local service_name compose_file
  1102. [ -e "/etc/compose/local.conf" ] && source /etc/compose/local.conf
  1103. compose_file=${opt_compose_file:-$DEFAULT_COMPOSE_FILE}
  1104. service_name=${opt_service_name:-$DEFAULT_BACKUP_SERVICE_NAME}
  1105. if ! [ -e "$compose_file" ]; then
  1106. err "Compose file not found in '$compose_file'."
  1107. return 1
  1108. fi
  1109. compose:install-backup "$BACKUP_SERVER" "$service_name" "$compose_file" \
  1110. "$opt_ignore_ping_check" "$opt_ignore_domain_check"
  1111. }
  1112. cmdline.spec:install:cmd:mailcow-backup:run() {
  1113. : :posarg: BACKUP_SERVER 'Target backup server'
  1114. : :optfla: --ignore-ping-check "Allow to bypass the ping check of host."
  1115. "mailcow:install-backup" "$BACKUP_SERVER" "$opt_ignore_ping_check"
  1116. }
  1117. cmdline.spec.gnu backup
  1118. cmdline.spec::cmd:backup:run() {
  1119. local vps_type
  1120. vps_type=$(vps:get-type) || {
  1121. err "Failed to get type of installation."
  1122. return 1
  1123. }
  1124. if ! fn.exists "cmdline.spec:backup:cmd:${vps_type}:run"; then
  1125. err "type '${vps_type}' has no backup process implemented yet."
  1126. return 1
  1127. fi
  1128. "cmdline.spec:backup:cmd:${vps_type}:run"
  1129. }
  1130. cmdline.spec:backup:cmd:mailcow:run() {
  1131. local cmd_line cron_line cmd
  1132. for f in mysql-backup mirror-dir; do
  1133. [ -e "/etc/cron.d/$f" ] || {
  1134. err "Can't find '/etc/cron.d/$f'."
  1135. echo " Have you forgotten to run 'vps install backup BACKUP_HOST' ?" >&2
  1136. return 1
  1137. }
  1138. if ! cron_line=$(cat "/etc/cron.d/$f" |
  1139. grep -v "^#" | grep "\* \* \*"); then
  1140. err "Can't find cron_line in '/etc/cron.d/$f'." \
  1141. "Have you modified it ?"
  1142. return 1
  1143. fi
  1144. cron_line=${cron_line%|*}
  1145. cmd_line=(${cron_line#*root})
  1146. if [ "$f" == "mirror-dir" ]; then
  1147. cmd=()
  1148. for arg in "${cmd_line[@]}"; do
  1149. [ "$arg" != "-q" ] && cmd+=("$arg")
  1150. done
  1151. else
  1152. cmd=("${cmd_line[@]}")
  1153. fi
  1154. code="${cmd[*]}"
  1155. echo "${WHITE}Launching:${NORMAL} ${code}"
  1156. {
  1157. {
  1158. (
  1159. ## Some commands are using colors that are already
  1160. ## set by this current program and will trickle
  1161. ## down unwantedly
  1162. ansi_color no
  1163. eval "${code}"
  1164. ) | sed -r "s/^/ ${GRAY}|${NORMAL} /g"
  1165. set_errlvl "${PIPESTATUS[0]}"
  1166. } 3>&1 1>&2 2>&3 | sed -r "s/^/ $DARKRED\!$NORMAL /g"
  1167. set_errlvl "${PIPESTATUS[0]}"
  1168. } 3>&1 1>&2 2>&3
  1169. if [ "$?" != "0" ]; then
  1170. err "Failed."
  1171. return 1
  1172. fi
  1173. done
  1174. info "Mysql backup and subsequent mirror-dir ${DARKGREEN}succeeded${NORMAL}."
  1175. }
  1176. set_errlvl() { return "${1:-1}"; }
  1177. cmdline.spec:backup:cmd:compose:run() {
  1178. local cron_line args
  1179. project_name=$(compose:project_name) || return 1
  1180. docker_cmd=$(compose:get_cron_docker_cmd) || return 1
  1181. echo "${WHITE}Launching:${NORMAL} docker exec -i "${project_name}_cron_1" $docker_cmd"
  1182. {
  1183. {
  1184. eval "docker exec -i \"${project_name}_cron_1\" $docker_cmd" | sed -r "s/^/ ${GRAY}|${NORMAL} /g"
  1185. set_errlvl "${PIPESTATUS[0]}"
  1186. } 3>&1 1>&2 2>&3 | sed -r "s/^/ $DARKRED\!$NORMAL /g"
  1187. set_errlvl "${PIPESTATUS[0]}"
  1188. } 3>&1 1>&2 2>&3
  1189. if [ "$?" != "0" ]; then
  1190. err "Failed."
  1191. return 1
  1192. fi
  1193. info "mirror-dir ${DARKGREEN}succeeded${NORMAL}."
  1194. }
  1195. cmdline.spec.gnu recover-target
  1196. cmdline.spec::cmd:recover-target:run() {
  1197. : :posarg: BACKUP_DIR 'Source directory on backup side'
  1198. : :posarg: HOST_DIR 'Target directory on host side'
  1199. : :optval: --backup-host,-B "The backup host"
  1200. : :optfla: --dry-run,-n "Don't do anything, instead tell what it
  1201. would do."
  1202. ## if no backup host take the one by default
  1203. backup_host="$opt_backup_host"
  1204. if [ -z "$backup_host" ]; then
  1205. backup_host_ident=$(backup-action get_default_backup_host_ident) || return 1
  1206. read -r backup_host ident <<<"$backup_host_ident"
  1207. fi
  1208. if [[ "$BACKUP_DIR" == /* ]]; then
  1209. err "BACKUP_DIR must be a relative path from the root of your backup."
  1210. return 1
  1211. fi
  1212. REAL_HOST_DIR=$(realpath "$HOST_DIR") || {
  1213. err "Can't find HOST_DIR '$HOST_DIR'."
  1214. return 1
  1215. }
  1216. export DRY_RUN="${opt_dry_run}"
  1217. backup-action recover-target "$backup_host" "$ident" "$BACKUP_DIR" "$REAL_HOST_DIR"
  1218. }
  1219. cmdline.spec.gnu odoo
  1220. cmdline.spec::cmd:odoo:run() {
  1221. :
  1222. }
  1223. cmdline.spec.gnu restart
  1224. cmdline.spec:odoo:cmd:restart:run() {
  1225. : :optval: --service,-s "The service (defaults to 'odoo')"
  1226. local out odoo_service
  1227. odoo_service="${opt_service:-odoo}"
  1228. project_name=$(compose:project_name) || return 1
  1229. if ! out=$(docker restart "${project_name}_${odoo_service}_1" 2>&1); then
  1230. if [[ "$out" == *"no matching entries in passwd file" ]]; then
  1231. warn "Catched docker bug. Restarting once more."
  1232. if ! out=$(docker restart "${project_name}_${odoo_service}_1"); then
  1233. err "Can't restart container ${project_name}_${odoo_service}_1 (restarted twice)."
  1234. echo " output:" >&2
  1235. echo "$out" | prefix " ${GRAY}|${NORMAL} " >&2
  1236. exit 1
  1237. fi
  1238. else
  1239. err "Couldn't restart container ${project_name}_${odoo_service}_1 (and no restart bug detected)."
  1240. exit 1
  1241. fi
  1242. fi
  1243. info "Container ${project_name}_${odoo_service}_1 was ${DARKGREEN}successfully${NORMAL} restarted."
  1244. }
  1245. cmdline.spec.gnu restore
  1246. cmdline.spec:odoo:cmd:restore:run() {
  1247. : :posarg: ZIP_DUMP_LOCATION 'Source odoo dump file to restore
  1248. (can be a local file or an url)'
  1249. : :optval: --service,-s "The service (defaults to 'odoo')"
  1250. : :optval: --database,-d 'Target database (default if not specified)'
  1251. local out
  1252. odoo_service="${opt_service:-odoo}"
  1253. if [[ "$ZIP_DUMP_LOCATION" == "http://"* ]] ||
  1254. [[ "$ZIP_DUMP_LOCATION" == "https://"* ]]; then
  1255. settmpdir ZIP_TMP_LOCATION
  1256. tmp_location="$ZIP_TMP_LOCATION/dump.zip"
  1257. curl -k -s -L "$ZIP_DUMP_LOCATION" > "$tmp_location" || {
  1258. err "Couldn't get '$ZIP_DUMP_LOCATION'."
  1259. exit 1
  1260. }
  1261. if [[ "$(dd if="$tmp_location" count=2 bs=1 2>/dev/null)" != "PK" ]]; then
  1262. err "Download doesn't seem to be a zip file."
  1263. dd if="$tmp_location" count=1 bs=256 | hd | prefix " ${GRAY}|${NORMAL} " >&2
  1264. exit 1
  1265. fi
  1266. info "Successfully downloaded '$ZIP_DUMP_LOCATION'"
  1267. echo " in '$tmp_location'." >&2
  1268. ZIP_DUMP_LOCATION="$tmp_location"
  1269. fi
  1270. [ -e "$ZIP_DUMP_LOCATION" ] || {
  1271. err "No file '$ZIP_DUMP_LOCATION' found." >&2
  1272. exit 1
  1273. }
  1274. #cmdline.spec:odoo:cmd:restart:run --service "$odoo_service" || exit 1
  1275. msg_dbname=default
  1276. [ -n "$opt_database" ] && msg_dbname="'$opt_database'"
  1277. compose --no-hooks drop "$odoo_service" $opt_database || {
  1278. err "Error dropping $msg_dbname database of service ${DARKYELLOW}$odoo_service${NORMAL}."
  1279. exit 1
  1280. }
  1281. compose --no-hooks load "$odoo_service" $opt_database < "$ZIP_DUMP_LOCATION" || {
  1282. err "Error restoring service ${DARKYELLOW}$odoo_service${NORMAL} to $msg_dbname database."
  1283. exit 1
  1284. }
  1285. info "Successfully restored ${DARKYELLOW}$odoo_service${NORMAL} to $msg_dbname database."
  1286. ## Restart odoo, ensure there is no bugs lingering on it.
  1287. cmdline.spec:odoo:cmd:restart:run --service "$odoo_service" || exit 1
  1288. }
  1289. cmdline.spec.gnu dump
  1290. cmdline.spec:odoo:cmd:dump:run() {
  1291. : :posarg: DUMP_ZIPFILE 'Target path to store odoo dump zip file.'
  1292. : :optval: --database,-d 'Target database (default if not specified)'
  1293. : :optval: --service,-s "The service (defaults to 'odoo')"
  1294. odoo_service="${opt_service:-odoo}"
  1295. msg_dbname=default
  1296. [ -n "$opt_database" ] && msg_dbname="'$opt_database'"
  1297. compose --no-hooks save "$odoo_service" $opt_database > "$DUMP_ZIPFILE" || {
  1298. err "Error dumping ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database to '$DUMP_ZIPFILE'."
  1299. exit 1
  1300. }
  1301. info "Successfully dumped ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database to '$DUMP_ZIPFILE'."
  1302. }
  1303. cmdline.spec.gnu drop
  1304. cmdline.spec:odoo:cmd:drop:run() {
  1305. : :optval: --database,-d 'Target database (default if not specified)'
  1306. : :optval: --service,-s "The service (defaults to 'odoo')"
  1307. odoo_service="${opt_service:-odoo}"
  1308. msg_dbname=default
  1309. [ -n "$opt_database" ] && msg_dbname="'$opt_database'"
  1310. compose --no-hooks drop "$odoo_service" $opt_database || {
  1311. err "Error dropping ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database."
  1312. exit 1
  1313. }
  1314. info "Successfully dropped ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database."
  1315. }
  1316. cmdline.spec.gnu set-cyclos-url
  1317. cmdline.spec:odoo:cmd:set-cyclos-url:run() {
  1318. : :optval: --database,-d "Target database ('odoo' if not specified)"
  1319. : :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
  1320. local URL
  1321. dbname=${opt_database:-odoo}
  1322. cyclos_service="${opt_service:-cyclos}"
  1323. project_name=$(compose:project_name) || exit 1
  1324. URL=$(compose:get_url "${project_name}" "${cyclos_service}") || exit 1
  1325. Wrap -d "set cyclos url to '$URL'" <<EOF || exit 1
  1326. echo "UPDATE res_company SET cyclos_server_url = '$URL/api' WHERE id=1;" |
  1327. compose:psql "$project_name" "$dbname" || {
  1328. err "Failed to set cyclos url value in '$dbname' database."
  1329. exit 1
  1330. }
  1331. EOF
  1332. }
  1333. cmdline.spec.gnu fix-sso
  1334. cmdline.spec:odoo:cmd:fix-sso:run() {
  1335. : :optval: --database,-d "Target database ('odoo' if not specified)"
  1336. local public_user_id project_name dbname
  1337. dbname=${opt_database:-odoo}
  1338. project_name=$(compose:project_name) || exit 1
  1339. public_user_id=$(odoo:get_public_user_id "${project_name}" "${dbname}") || exit 1
  1340. Wrap -d "fix website's object to 'public_user' (id=$public_user_id)" <<EOF || exit 1
  1341. echo "UPDATE website SET user_id = $public_user_id;" |
  1342. compose:psql "$project_name" "$dbname" || {
  1343. err "Failed to set website's object user_id to public user's id ($public_user_id) in '$dbname' database."
  1344. exit 1
  1345. }
  1346. EOF
  1347. }
  1348. cmdline.spec.gnu cyclos
  1349. cmdline.spec::cmd:cyclos:run() {
  1350. :
  1351. }
  1352. cmdline.spec:cyclos:cmd:dump:run() {
  1353. : :posarg: DUMP_GZFILE 'Target path to store odoo dump gz file.'
  1354. : :optval: --database,-d "Target database ('cyclos' if not specified)"
  1355. : :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
  1356. cyclos_service="${opt_service:-cyclos}"
  1357. cyclos_database="${opt_database:-cyclos}"
  1358. project_name=$(compose:project_name) || exit 1
  1359. container_id=$(compose:service:container_one "$project_name" "${cyclos_service}") || exit 1
  1360. Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1361. docker stop "$container_id" || exit 1
  1362. Wrap -d "Dump postgres database '${cyclos_database}'." -- \
  1363. postgres:dump "${project_name}" "$cyclos_database" "$DUMP_GZFILE" || exit 1
  1364. Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1365. docker start "${container_id}" || exit 1
  1366. }
  1367. cmdline.spec.gnu restore
  1368. cmdline.spec:cyclos:cmd:restore:run() {
  1369. : :posarg: GZ_DUMP_LOCATION 'Source cyclos dump file to restore
  1370. (can be a local file or an url)'
  1371. : :optval: --service,-s "The service (defaults to 'cyclos')"
  1372. : :optval: --database,-d 'Target database (default if not specified)'
  1373. local out
  1374. cyclos_service="${opt_service:-cyclos}"
  1375. cyclos_database="${opt_database:-cyclos}"
  1376. project_name=$(compose:project_name) || exit 1
  1377. url=$(compose:get_url "${project_name}" "${cyclos_service}") || return 1
  1378. container_id=$(compose:service:container_one "$project_name" "${cyclos_service}") || exit 1
  1379. if [[ "$GZ_DUMP_LOCATION" == "http://"* ]] ||
  1380. [[ "$GZ_DUMP_LOCATION" == "https://"* ]]; then
  1381. settmpdir GZ_TMP_LOCATION
  1382. tmp_location="$GZ_TMP_LOCATION/dump.gz"
  1383. Wrap -d "get '$GZ_DUMP_LOCATION'" <<EOF || exit 1
  1384. ## Note that curll version before 7.76.0 do not have
  1385. curl -k -s -L "$GZ_DUMP_LOCATION" --fail \\
  1386. > "$tmp_location" || {
  1387. echo "Error fetching ressource. Is url correct ?" >&2
  1388. exit 1
  1389. }
  1390. if [[ "\$(dd if="$tmp_location" count=2 bs=1 2>/dev/null |
  1391. hexdump -v -e "/1 \"%02x\"")" != "1f8b" ]]; then
  1392. err "Download doesn't seem to be a gzip file."
  1393. dd if="$tmp_location" count=1 bs=256 | hd | prefix " ${GRAY}|${NORMAL} " >&2
  1394. exit 1
  1395. fi
  1396. EOF
  1397. GZ_DUMP_LOCATION="$tmp_location"
  1398. fi
  1399. [ -e "$GZ_DUMP_LOCATION" ] || {
  1400. err "No file '$GZ_DUMP_LOCATION' found." >&2
  1401. exit 1
  1402. }
  1403. Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1404. docker stop "$container_id" || exit 1
  1405. ## XXXvlab: making the assumption that the postgres username should
  1406. ## be the same as the cyclos service selected (which is the default,
  1407. ## but not always the case).
  1408. Wrap -d "restore postgres database '${cyclos_database}'." -- \
  1409. postgres:restore "$project_name" "$GZ_DUMP_LOCATION" "${cyclos_service}@${cyclos_database}" || exit 1
  1410. ## ensure that the database is not locked
  1411. Wrap -d "check and remove database lock if any" -- \
  1412. cyclos:unlock "${project_name}" "${cyclos_database}" || exit 1
  1413. Wrap -d "set root url to '$url'" -- \
  1414. cyclos:set_root_url "${project_name}" "${cyclos_database}" "${url}" || exit 1
  1415. Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1416. docker start "${container_id}" || exit 1
  1417. }
  1418. cmdline.spec.gnu set-root-url
  1419. cmdline.spec:cyclos:cmd:set-root-url:run() {
  1420. : :optval: --database,-d "Target database ('cyclos' if not specified)"
  1421. : :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
  1422. local URL
  1423. cyclos_database=${opt_database:-cyclos}
  1424. cyclos_service="${opt_service:-cyclos}"
  1425. project_name=$(compose:project_name) || exit 1
  1426. url=$(compose:get_url "${project_name}" "${cyclos_service}") || exit 1
  1427. container_id=$(compose:service:container_one "${project_name}" "${cyclos_service}") || exit 1
  1428. Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1429. docker stop "$container_id" || exit 1
  1430. Wrap -d "set root url to '$url'" -- \
  1431. cyclos:set_root_url "${project_name}" "${cyclos_database}" "${url}" || exit 1
  1432. Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1433. docker start "${container_id}" || exit 1
  1434. }
  1435. cmdline.spec.gnu unlock
  1436. cmdline.spec:cyclos:cmd:unlock:run() {
  1437. : :optval: --database,-d "Target database ('cyclos' if not specified)"
  1438. : :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
  1439. local URL
  1440. cyclos_database=${opt_database:-cyclos}
  1441. cyclos_service="${opt_service:-cyclos}"
  1442. project_name=$(compose:project_name) || exit 1
  1443. container_id=$(compose:service:container_one "${project_name}" "${cyclos_service}") || exit 1
  1444. Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1445. docker stop "$container_id" || exit 1
  1446. Wrap -d "check and remove database lock if any" -- \
  1447. cyclos:unlock "${project_name}" "${cyclos_database}" || exit 1
  1448. Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1449. docker start "${container_id}" || exit 1
  1450. }
  1451. cmdline.spec.gnu rocketchat
  1452. cmdline.spec::cmd:rocketchat:run() {
  1453. :
  1454. }
  1455. cmdline.spec.gnu drop-indexes
  1456. cmdline.spec:rocketchat:cmd:drop-indexes:run() {
  1457. : :optval: --database,-d "Target database ('rocketchat' if not specified)"
  1458. : :optval: --service,-s "The rocketchat service name (defaults to 'rocketchat')"
  1459. local URL
  1460. rocketchat_database=${opt_database:-rocketchat}
  1461. rocketchat_service="${opt_service:-rocketchat}"
  1462. project_name=$(compose:project_name) || exit 1
  1463. container_id=$(compose:service:container_one "${project_name}" "${rocketchat_service}") || exit 1
  1464. Wrap -d "stop ${DARKYELLOW}${rocketchat_service}${NORMAL}'s container" -- \
  1465. docker stop "$container_id" || exit 1
  1466. errlvl=0
  1467. Wrap -d "drop indexes" -- \
  1468. rocketchat:drop-indexes "${project_name}" "${rocketchat_database}" || {
  1469. errlvl=1
  1470. errmsg="Failed to drop indexes"
  1471. }
  1472. Wrap -d "start ${DARKYELLOW}${rocketchat_service}${NORMAL}'s container" -- \
  1473. docker start "${container_id}" || exit 1
  1474. if [ "$errlvl" != 0 ]; then
  1475. err "$errmsg"
  1476. fi
  1477. exit "$errlvl"
  1478. }
  1479. cmdline.spec.gnu nextcloud
  1480. cmdline.spec::cmd:nextcloud:run() {
  1481. :
  1482. }
  1483. cmdline.spec.gnu upgrade
  1484. cmdline.spec:nextcloud:cmd:upgrade:run() {
  1485. : :posarg: [TARGET_VERSION] "Source cyclos dump file to restore"
  1486. : :optval: --service,-s "The nexcloud service name (defaults to 'nextcloud')"
  1487. local URL
  1488. nextcloud_service="${opt_service:-nextcloud}"
  1489. project_name=$(compose:project_name) || exit 1
  1490. containers=$(compose:service:containers "${project_name}" "${nextcloud_service}") || exit 1
  1491. container_stopped=()
  1492. if [ -n "$containers" ]; then
  1493. for container in $containers; do
  1494. Wrap -d "stop ${DARKYELLOW}${nextcloud_service}${NORMAL}'s container" -- \
  1495. docker stop "$container" || {
  1496. err "Failed to stop container '$container'."
  1497. exit 1
  1498. }
  1499. container_stopped+=("$container")
  1500. done
  1501. fi
  1502. before_version=$(nextcloud:src:version) || exit 1
  1503. ## -q to remove the display of ``compose`` related information
  1504. ## like relation resolution.
  1505. ## --no-hint to remove the final hint about modifying your
  1506. ## ``compose.yml``.
  1507. compose -q upgrade "$nextcloud_service" --no-hint
  1508. errlvl="$?"
  1509. after_version=$(nextcloud:src:version)
  1510. if [ "$after_version" != "$before_version" ]; then
  1511. desc="update \`compose.yml\` to set ${DARKYELLOW}$nextcloud_service${NORMAL}'s "
  1512. desc+="docker image to actual code version ${WHITE}${after_version}${NORMAL}"
  1513. Wrap -d "$desc" -- \
  1514. compose:file:value-change \
  1515. "${nextcloud_service}.docker-compose.image" \
  1516. "docker.0k.io/nextcloud:${after_version}-myc" || exit 1
  1517. fi
  1518. if [ "$errlvl" == 0 ]; then
  1519. echo "${WHITE}Launching final compose${NORMAL}"
  1520. compose up || exit 1
  1521. fi
  1522. exit "$errlvl"
  1523. }
  1524. cmdline.spec.gnu check-fix
  1525. cmdline.spec::cmd:check-fix:run() {
  1526. : :posarg: [SERVICES...] "Optional service to check"
  1527. : :optval: --check,-c "Specify a check or a list of checks separated by commas"
  1528. : :optfla: --silent,-s "Don't ouput anything if everything goes well"
  1529. local project_name service_name containers container check
  1530. all_checks=$(declare -F |
  1531. egrep '^declare -fx? container:health:check-fix:[^ ]+$' |
  1532. cut -f 4 -d ":")
  1533. checks=(${opt_check//,/ })
  1534. for check in "${checks[@]}"; do
  1535. fn.exists container:health:check-fix:$check || {
  1536. err "check '$check' not found."
  1537. return 1
  1538. }
  1539. done
  1540. if [ "${#checks[*]}" == 0 ]; then
  1541. checks=($all_checks)
  1542. fi
  1543. ## XXXvlab: could make it parallel
  1544. project_name=$(compose:project_name) || exit 1
  1545. containers=($(compose:project:containers "${project_name}")) || exit 1
  1546. found=
  1547. for container in "${containers[@]}"; do
  1548. service_name=$(docker ps --filter id="$container" --format '{{.Label "com.docker.compose.service"}}')
  1549. if [ "${#SERVICES[@]}" -gt 0 ]; then
  1550. [[ " ${SERVICES[*]} " == *" $service_name "* ]] || continue
  1551. fi
  1552. found=1
  1553. one_bad=
  1554. for check in "${checks[@]}"; do
  1555. if ! container:health:check-fix:"$check" "$container"; then
  1556. one_bad=1
  1557. fi
  1558. done
  1559. if [ -z "$opt_silent" ] && [ -z "$one_bad" ]; then
  1560. Elt "containers have been checked for ${DARKYELLOW}$service_name${NORMAL}"
  1561. Feedback
  1562. fi
  1563. done
  1564. if [ -z "$found" ]; then
  1565. if [ -z "$opt_silent" ]; then
  1566. if [ "${#SERVICES[@]}" -gt 0 ]; then
  1567. warn "No container for given services found in current project '$project_name'."
  1568. else
  1569. warn "No container found for current project '$project_name'."
  1570. fi
  1571. fi
  1572. return 1
  1573. fi
  1574. }
  1575. awk:require() {
  1576. local require_at_least="$1" version already_installed
  1577. while true; do
  1578. if ! version=$(awk --version 2>/dev/null); then
  1579. version=""
  1580. else
  1581. version=${version%%,*}
  1582. version=${version##* }
  1583. fi
  1584. if [ -z "$version" ] || version_gt "$require_at_least" "$version"; then
  1585. if [ -z "$already_installed" ]; then
  1586. if [ -z "$version" ]; then
  1587. info "No 'gawk' available, probably using a clone. Installing 'gawk'..."
  1588. else
  1589. info "Found gawk version '$version'. Updating 'gawk'..."
  1590. fi
  1591. apt-get install gawk -y </dev/null || {
  1592. err "Failed to install 'gawk'."
  1593. return 1
  1594. }
  1595. already_installed=true
  1596. else
  1597. if [ -z "$version" ]; then
  1598. err "No 'gawk' available even after having installed one"
  1599. else
  1600. err "'gawk' version '$version' is lower than required" \
  1601. "'$require_at_least' even after updating 'gawk'."
  1602. fi
  1603. return 1
  1604. fi
  1605. continue
  1606. fi
  1607. return 0
  1608. done
  1609. }
  1610. cmdline.spec.gnu stats
  1611. cmdline.spec::cmd:stats:run() {
  1612. : :optval: --format,-f "Either 'silent', 'raw', or 'pretty', default is pretty."
  1613. : :optfla: --silent,-s "Shorthand for '--format silent'"
  1614. : :optval: --resource,-r 'resource(s) separated with a comma'
  1615. local project_name service_name containers container check
  1616. if [[ -n "${opt_silent}" ]]; then
  1617. if [[ -n "${opt_format}" ]]; then
  1618. err "'--silent' conflict with option '--format'."
  1619. return 1
  1620. fi
  1621. opt_format=s
  1622. fi
  1623. opt_format="${opt_format:-pretty}"
  1624. case "${opt_format}" in
  1625. raw|r)
  1626. opt_format="raw"
  1627. :
  1628. ;;
  1629. silent|s)
  1630. opt_format="silent"
  1631. ;;
  1632. pretty|p)
  1633. opt_format="pretty"
  1634. awk:require 4.1.4 || return 1
  1635. ;;
  1636. *)
  1637. err "Invalid value '$opt_format' for option --format"
  1638. echo " use either 'raw' (shorthand 'r'), 'silent' (shorthand 's') or pretty (shorthand 'p')." >&2
  1639. return 1
  1640. esac
  1641. local resources=(c.{memory,network} load_avg)
  1642. if [ -n "${opt_resource}" ]; then
  1643. resources=(${opt_resource//,/ })
  1644. fi
  1645. local not_found=()
  1646. for resource in "${resources[@]}"; do
  1647. if ! fn.exists "stats:$resource"; then
  1648. not_found+=("$resource")
  1649. fi
  1650. done
  1651. if [[ "${#not_found[@]}" -gt 0 ]]; then
  1652. not_found_msg=$(printf "%s, " "${not_found[@]}")
  1653. not_found_msg=${not_found_msg%, }
  1654. err "Unsupported resource(s) provided: ${not_found_msg}"
  1655. echo " resource must be one-of:" >&2
  1656. declare -F | egrep -- '-fx? stats:[a-zA-Z0-9_.]+$' | cut -f 3- -d " " | cut -f 2- -d ":" | prefix " - " >&2
  1657. return 1
  1658. fi
  1659. :state-dir:
  1660. for resource in "${resources[@]}"; do
  1661. [ "$opt_format" == "pretty" ] && echo "${WHITE}$resource${NORMAL}:"
  1662. stats:"$resource" "$opt_format" 2>&1 | prefix " "
  1663. set_errlvl "${PIPESTATUS[0]}" || return 1
  1664. done
  1665. }
  1666. stats:c.memory() {
  1667. local format="$1"
  1668. local out
  1669. container_to_check=($(docker:running_containers)) || exit 1
  1670. out=$(docker:containers:stats "${container_to_check[@]}")
  1671. printf "%s\n" "$out" | rrd:update "containers" "memory|3:usage:GAUGE:U:U,4:inactive:GAUGE:U:U" || {
  1672. return 1
  1673. }
  1674. case "${format:-p}" in
  1675. raw|r)
  1676. printf "%s\n" "$out" | cut -f 1-5 -d " "
  1677. ;;
  1678. pretty|p)
  1679. awk:require 4.1.4 || return 1
  1680. {
  1681. echo "container" "__total____" "buffered____" "resident____"
  1682. printf "%s\n" "$out" |
  1683. awk '
  1684. {
  1685. offset = strftime("%z", $2);
  1686. print $1, substr($0, index($0,$3));
  1687. }' | cut -f 1-4 -d " " |
  1688. numfmt --field 2-4 --to=iec-i --format=%8.1fB |
  1689. sed -r 's/(\.[0-9])([A-Z]?iB)/\1:\2/g' |
  1690. sort
  1691. } | col:normalize:size -+++ |
  1692. sed -r 's/(\.[0-9]):([A-Z]?iB)/\1 \2/g' |
  1693. header:make
  1694. ;;
  1695. esac
  1696. }
  1697. stats:c.network() {
  1698. local format="$1"
  1699. local out
  1700. container_to_check=($(docker:running_containers)) || exit 1
  1701. out=$(docker:containers:stats "${container_to_check[@]}")
  1702. cols=(
  1703. {rx,tx}_{bytes,packets,errors,dropped}
  1704. )
  1705. idx=5 ## starting column idx for next fields
  1706. defs=()
  1707. for col in "${cols[@]}"; do
  1708. defs+=("$((idx++)):${col}:COUNTER:U:U")
  1709. done
  1710. OLDIFS="$IFS"
  1711. IFS="," defs="${defs[*]}"
  1712. IFS="$OLDIFS"
  1713. printf "%s\n" "$out" |
  1714. rrd:update "containers" \
  1715. "network|${defs}" || {
  1716. return 1
  1717. }
  1718. case "${format:-p}" in
  1719. raw|r)
  1720. printf "%s\n" "$out" | cut -f 1,2,7- -d " "
  1721. ;;
  1722. pretty|p)
  1723. awk:require 4.1.4 || return 1
  1724. {
  1725. echo "container" "_" "_" "_" "RX" "_" "_" "_" "TX"
  1726. echo "_" "__bytes____" "__packets" "__errors" "__dropped" "__bytes____" "__packets" "__errors" "__dropped"
  1727. printf "%s\n" "$out" |
  1728. awk '
  1729. {
  1730. offset = strftime("%z", $2);
  1731. print $1, substr($0, index($0,$7));
  1732. }' |
  1733. numfmt --field 2,6 --to=iec-i --format=%8.1fB |
  1734. numfmt --field 3,4,5,7,8,9 --to=si --format=%8.1f |
  1735. sed -r 's/(\.[0-9])([A-Z]?(iB|B)?)/\1:\2/g' |
  1736. sort
  1737. } | col:normalize:size -++++++++ |
  1738. sed -r '
  1739. s/(\.[0-9]):([A-Z]?iB)/\1 \2/g;
  1740. s/(\.[0-9]):([KMGTPE])/\1 \2/g;
  1741. s/ ([0-9]+)\.0:B/\1 /g;
  1742. s/ ([0-9]+)\.0:/\1 /g;
  1743. ' |
  1744. header:make 2
  1745. ;;
  1746. esac
  1747. }
  1748. header:make() {
  1749. local nb_line="${1:-1}"
  1750. local line
  1751. while ((nb_line-- > 0)); do
  1752. read-0a line
  1753. echo "${GRAY}$(printf "%s" "$line" | sed -r 's/_/ /g')${NORMAL}"
  1754. done
  1755. cat
  1756. }
  1757. stats:load_avg() {
  1758. local format="$1"
  1759. local out
  1760. out=$(host:sys:load_avg)
  1761. printf "%s\n" "$out" | rrd:update "" "load_avg|2:load_avg_1:GAUGE:U:U,3:load_avg_5:GAUGE:U:U,4:load_avg_15:GAUGE:U:U" || {
  1762. return 1
  1763. }
  1764. case "${format:-p}" in
  1765. raw|r)
  1766. printf "%s\n" "$out" | cut -f 2-5 -d " "
  1767. ;;
  1768. pretty|p)
  1769. {
  1770. echo "___1m" "___5m" "__15m"
  1771. printf "%s\n" "$out" | cut -f 3-5 -d " "
  1772. } | col:normalize:size +++ | header:make
  1773. ;;
  1774. esac
  1775. }
  1776. host:sys:load_avg() {
  1777. local uptime
  1778. uptime="$(uptime)"
  1779. uptime=${uptime##*: }
  1780. uptime=${uptime//,/}
  1781. printf "%s " "" "$(date +%s)" "$uptime"
  1782. }
  1783. cmdline::parse "$@"