You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

2209 lines
66 KiB

  1. #!/bin/bash
  2. . /etc/shlib
  3. include common
  4. include parse
  5. include cmdline
  6. include config
  7. include cache
  8. include fn
  9. include docker
  10. [[ "${BASH_SOURCE[0]}" != "${0}" ]] && SOURCED=true
  11. version=0.1
  12. desc='Install backup'
  13. help=""
  14. version_gt() { test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"; }
  15. docker:running-container-projects() {
  16. :cache: scope=session
  17. docker ps --format '{{.Label "com.docker.compose.project"}}' | sort | uniq
  18. }
  19. decorator._mangle_fn docker:running-container-projects
  20. ssh:mk-private-key() {
  21. local host="$1" service_name="$2"
  22. (
  23. settmpdir VPS_TMPDIR
  24. ssh-keygen -t rsa -N "" -f "$VPS_TMPDIR/rsync_rsa" -C "$service_name@$host" >/dev/null
  25. cat "$VPS_TMPDIR/rsync_rsa"
  26. )
  27. }
  28. mailcow:has-images-running() {
  29. local images
  30. images=$(docker ps --format '{{.Image}}' | sort | uniq)
  31. [[ $'\n'"$images" == *$'\n'"mailcow/"* ]]
  32. }
  33. mailcow:has-container-project-mentionning-mailcow() {
  34. local projects
  35. projects=$(docker:running-container-projects) || return 1
  36. [[ $'\n'"$projects"$'\n' == *mailcow* ]]
  37. }
  38. mailcow:has-running-containers() {
  39. mailcow:has-images-running ||
  40. mailcow:has-container-project-mentionning-mailcow
  41. }
  42. mailcow:get-root() {
  43. :cache: scope=session
  44. local dir
  45. for dir in {/opt{,/apps},/root}/mailcow-dockerized; do
  46. [ -d "$dir" ] || continue
  47. [ -r "$dir/mailcow.conf" ] || continue
  48. echo "$dir"
  49. return 0
  50. done
  51. return 1
  52. }
  53. decorator._mangle_fn mailcow:get-root
  54. compose:get-compose-yml() {
  55. :cache: scope=session
  56. local path
  57. [ -e "/etc/compose/local.conf" ] && . "/etc/compose/local.conf"
  58. path=${DEFAULT_COMPOSE_FILE:-/etc/compose/compose.yml}
  59. [ -e "$path" ] || return 1
  60. echo "$path"
  61. }
  62. decorator._mangle_fn compose:get-compose-yml
  63. export -f compose:get-compose-yml
  64. compose:has-container-project-myc() {
  65. local projects
  66. projects=$(docker:running-container-projects) || return 1
  67. [[ $'\n'"$projects"$'\n' == *$'\n'"myc"$'\n'* ]]
  68. }
  69. compose:file:value-change() {
  70. local key="$1" value="$2"
  71. yaml:file:value-change "$(compose:get-compose-yml)" "$key" "$value" || exit 1
  72. }
  73. export -f compose:file:value-change
  74. yaml:file:value-change() {
  75. local file="$1" key="$2" value="$3" first=1 count=0 diff=""
  76. (
  77. cd "${file%/*}"
  78. while read-0 hunk; do
  79. if [ -n "$first" ]; then
  80. diff+="$hunk"
  81. first=
  82. continue
  83. fi
  84. if [[ "$hunk" =~ $'\n'"+"[[:space:]]+"${key##*.}:" ]]; then
  85. ((count++))
  86. diff+="$hunk" >&2
  87. else
  88. :
  89. # echo "discarding:" >&2
  90. # e "$hunk" | prefix " | " >&2
  91. fi
  92. done < <(
  93. export DEBUG=
  94. settmpdir YQ_TEMP
  95. cp "${file}" "$YQ_TEMP/compose.yml" &&
  96. yq -i ".${key} = \"${value}\"" "$YQ_TEMP/compose.yml" &&
  97. sed -ri 's/^([^# ])/\n\0/g' "$YQ_TEMP/compose.yml" &&
  98. diff -u0 -Z "${file}" "$YQ_TEMP/compose.yml" |
  99. sed -r "s/^(@@.*)$/\x00\1/g;s%^(\+\+\+) [^\t]+%\1 ${file}%g"
  100. printf "\0"
  101. )
  102. if [[ "$count" == 0 ]]; then
  103. err "No change made to '$file'."
  104. return 1
  105. fi
  106. if [[ "$count" != 1 ]]; then
  107. err "compose file change request seems dubious and was refused:"
  108. e "$diff" | prefix " | " >&2
  109. return 1
  110. fi
  111. echo Applying: >&2
  112. e "$diff" | prefix " | " >&2
  113. patch <<<"$diff"
  114. ) || exit 1
  115. }
  116. export -f yaml:file:value-change
  117. type:is-mailcow() {
  118. mailcow:get-root >/dev/null ||
  119. mailcow:has-running-containers
  120. }
  121. type:is-compose() {
  122. compose:get-compose-yml >/dev/null &&
  123. compose:has-container-project-myc
  124. }
  125. vps:get-type() {
  126. :cache: scope=session
  127. local fn
  128. for fn in $(declare -F | cut -f 3 -d " " | egrep "^type:is-"); do
  129. "$fn" && {
  130. echo "${fn#type:is-}"
  131. return 0
  132. }
  133. done
  134. return 1
  135. }
  136. decorator._mangle_fn vps:get-type
  137. mirror-dir:sources() {
  138. :cache: scope=session
  139. if ! shyaml get-values default.sources < /etc/mirror-dir/config.yml; then
  140. err "Couldn't query 'default.sources' in '/etc/mirror-dir/config.yml'."
  141. return 1
  142. fi
  143. }
  144. decorator._mangle_fn mirror-dir:sources
  145. mirror-dir:check-add() {
  146. local elt="$1" sources
  147. sources=$(mirror-dir:sources) || return 1
  148. if [[ $'\n'"$sources"$'\n' == *$'\n'"$elt"$'\n'* ]]; then
  149. info "Volume $elt already in sources"
  150. else
  151. Elt "Adding directory $elt"
  152. sed -i "/sources:/a\ - \"${elt}\"" \
  153. /etc/mirror-dir/config.yml
  154. Feedback || return 1
  155. fi
  156. }
  157. mirror-dir:check-add-vol() {
  158. local elt="$1"
  159. mirror-dir:check-add "/var/lib/docker/volumes/*_${elt}-*/_data"
  160. }
  161. ## The first colon is to prevent auto-export of function from shlib
  162. : ; bash-bug-5() { { cat; } < <(e) >/dev/null; ! cat "$1"; } && bash-bug-5 <(e) 2>/dev/null &&
  163. export BASH_BUG_5=1 && unset -f bash_bug_5
  164. wrap() {
  165. local label="$1" code="$2"
  166. shift 2
  167. export VERBOSE=1
  168. interpreter=/bin/bash
  169. if [ -n "$BASH_BUG_5" ]; then
  170. (
  171. settmpdir tmpdir
  172. fname=${label##*/}
  173. e "$code" > "$tmpdir/$fname" &&
  174. chmod +x "$tmpdir/$fname" &&
  175. Wrap -vsd "$label" -- "$interpreter" "$tmpdir/$fname" "$@"
  176. )
  177. else
  178. Wrap -vsd "$label" -- "$interpreter" <(e "$code") "$@"
  179. fi
  180. }
  181. ping_check() {
  182. #global ignore_ping_check
  183. local host="$1"
  184. ip=$(getent ahosts "$host" | egrep "^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\s+" |
  185. head -n 1 | cut -f 1 -d " ") || return 1
  186. my_ip=$(curl -s myip.kal.fr)
  187. if [ "$ip" != "$my_ip" ]; then
  188. if [ -n "$ignore_ping_check" ]; then
  189. warn "IP of '$host' ($ip) doesn't match mine ($my_ip). Ignoring due to ``--ignore-ping-check`` option."
  190. else
  191. err "IP of '$host' ($ip) doesn't match mine ($my_ip). Use ``--ignore-ping-check`` to ignore check."
  192. return 1
  193. fi
  194. fi
  195. }
  196. mailcow:install-backup() {
  197. local BACKUP_SERVER="$1" ignore_ping_check="$2" mailcow_root DOMAIN
  198. ## find installation
  199. mailcow_root=$(mailcow:get-root) || {
  200. err "Couldn't find a valid mailcow root directory."
  201. return 1
  202. }
  203. ## check ok
  204. DOMAIN=$(cat "$mailcow_root/.env" | grep ^MAILCOW_HOSTNAME= | cut -f 2 -d =) || {
  205. err "Couldn't find MAILCOW_HOSTNAME in file \"$mailcow_root/.env\"."
  206. return 1
  207. }
  208. ping_check "$DOMAIN" || return 1
  209. MYSQL_ROOT_PASSWORD=$(cat "$mailcow_root/.env" | grep ^DBROOT= | cut -f 2 -d =) || {
  210. err "Couldn't find DBROOT in file \"$mailcow_root/.env\"."
  211. return 1
  212. }
  213. if docker compose >/dev/null 2>&1; then
  214. MYSQL_CONTAINER=${MYSQL_CONTAINER:-mailcowdockerized-mysql-mailcow-1}
  215. else
  216. MYSQL_CONTAINER=${MYSQL_CONTAINER:-mailcowdockerized_mysql-mailcow_1}
  217. fi
  218. container_id=$(docker ps -f name="$MYSQL_CONTAINER" --format "{{.ID}}")
  219. if [ -z "$container_id" ]; then
  220. err "Couldn't find docker container named '$MYSQL_CONTAINER'."
  221. return 1
  222. fi
  223. export KEY_BACKUP_ID="mailcow"
  224. export MYSQL_ROOT_PASSWORD
  225. export MYSQL_CONTAINER
  226. export BACKUP_SERVER
  227. export DOMAIN
  228. wrap "Install rsync-backup on host" "
  229. cd /srv/charm-store/rsync-backup
  230. bash ./hooks/install.d/60-install.sh
  231. " || return 1
  232. wrap "Mysql dump install" "
  233. cd /srv/charm-store/mariadb
  234. bash ./hooks/install.d/60-backup.sh
  235. " || return 1
  236. ## Using https://github.com/mailcow/mailcow-dockerized/blob/master/helper-scripts/backup_and_restore.sh
  237. for elt in "vmail{,-attachments-vol}" crypt redis rspamd postfix; do
  238. mirror-dir:check-add-vol "$elt" || return 1
  239. done
  240. mirror-dir:check-add "$mailcow_root" || return 1
  241. mirror-dir:check-add "/var/backups/mysql" || return 1
  242. mirror-dir:check-add "/etc" || return 1
  243. dest="$BACKUP_SERVER"
  244. dest="${dest%/*}"
  245. ssh_options=()
  246. if [[ "$dest" == *":"* ]]; then
  247. port="${dest##*:}"
  248. dest="${dest%%:*}"
  249. ssh_options=(-p "$port")
  250. else
  251. port=""
  252. dest="${dest%%:*}"
  253. fi
  254. info "You can run this following command from an host having admin access to $dest:"
  255. echo " (Or send it to a backup admin of $dest)" >&2
  256. echo "ssh ${ssh_options[@]} myadmin@$dest ssh-key add '$(cat /var/lib/rsync/.ssh/id_rsa.pub)'"
  257. }
  258. compose:has_domain() {
  259. local compose_file="$1" host="$2" name conf relation relation_value domain server_aliases
  260. while read-0 name conf ; do
  261. name=$(e "$name" | shyaml get-value)
  262. if [[ "$name" =~ ^[a-zA-Z0-9-]+\.[a-zA-Z0-9-]+ ]]; then
  263. [ "$host" == "$name" ] && return 0
  264. fi
  265. rel=$(e "$conf" | shyaml -y get-value relations 2>/dev/null) || continue
  266. for relation in web-proxy publish-dir; do
  267. relation_value=$(e "$rel" | shyaml -y get-value "$relation" 2>/dev/null) || continue
  268. while read-0 label conf_relation; do
  269. domain=$(e "$conf_relation" | shyaml get-value "domain" 2>/dev/null) && {
  270. [ "$host" == "$domain" ] && return 0
  271. }
  272. server_aliases=$(e "$conf_relation" | shyaml get-values "server-aliases" 2>/dev/null) && {
  273. [[ $'\n'"$server_aliases" == *$'\n'"$host"$'\n'* ]] && return 0
  274. }
  275. done < <(e "$relation_value" | shyaml -y key-values-0)
  276. done
  277. done < <(shyaml -y key-values-0 < "$compose_file")
  278. return 1
  279. }
  280. compose:install-backup() {
  281. local BACKUP_SERVER="$1" service_name="$2" compose_file="$3" ignore_ping_check="$4" ignore_domain_check="$5"
  282. ## XXXvlab: far from perfect as it mimics and depends internal
  283. ## logic of current default way to get a domain in compose-core
  284. host=$(hostname)
  285. if ! compose:has_domain "$compose_file" "$host"; then
  286. if [ -n "$ignore_domain_check" ]; then
  287. warn "domain of '$host' not found in compose file '$compose_file'. Ignoring due to ``--ignore-domain-check`` option."
  288. else
  289. err "domain of '$host' not found in compose file '$compose_file'. Use ``--ignore-domain-check`` to ignore check."
  290. return 1
  291. fi
  292. fi
  293. ping_check "$host" || return 1
  294. if [ -e "/root/.ssh/rsync_rsa" ]; then
  295. warn "deleting private key in /root/.ssh/rsync_rsa, has we are not using it anymore."
  296. rm -fv /root/.ssh/rsync_rsa
  297. fi
  298. if [ -e "/root/.ssh/rsync_rsa.pub" ]; then
  299. warn "deleting public key in /root/.ssh/rsync_rsa.pub, has we are not using it anymore."
  300. rm -fv /root/.ssh/rsync_rsa.pub
  301. fi
  302. if service_cfg=$(cat "$compose_file" |
  303. shyaml get-value -y "$service_name" 2>/dev/null); then
  304. info "Entry for service ${DARKYELLOW}$service_name${NORMAL}" \
  305. "is already present in '$compose_file'."
  306. cfg=$(e "$service_cfg" | shyaml get-value -y options) || {
  307. err "No ${WHITE}options${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
  308. "entry in '$compose_file'."
  309. return 1
  310. }
  311. private_key=$(e "$cfg" | shyaml get-value private-key) || return 1
  312. target=$(e "$cfg" | shyaml get-value target) || return 1
  313. if [ "$target" != "$BACKUP_SERVER" ]; then
  314. err "Existing backup target '$target' is different" \
  315. "from specified '$BACKUP_SERVER'"
  316. return 1
  317. fi
  318. else
  319. private_key=$(ssh:mk-private-key "$host" "$service_name")
  320. cat <<EOF >> "$compose_file"
  321. $service_name:
  322. options:
  323. ident: $host
  324. target: $BACKUP_SERVER
  325. private-key: |
  326. $(e "$private_key" | sed -r 's/^/ /g')
  327. EOF
  328. fi
  329. dest="$BACKUP_SERVER"
  330. dest="${dest%/*}"
  331. ssh_options=()
  332. if [[ "$dest" == *":"* ]]; then
  333. port="${dest##*:}"
  334. dest="${dest%%:*}"
  335. ssh_options=(-p "$port")
  336. else
  337. port=""
  338. dest="${dest%%:*}"
  339. fi
  340. info "You can run this following command from an host having admin access to $dest:"
  341. echo " (Or send it to a backup admin of $dest)" >&2
  342. ## We remove ending label (label will be added or not in the
  343. ## private key, and thus here, depending on the version of
  344. ## openssh-client)
  345. public_key=$(ssh-keygen -y -f <(e "$private_key"$'\n') | sed -r 's/ [^ ]+@[^ ]+$//')
  346. echo "ssh ${ssh_options[@]} myadmin@$dest ssh-key add '$public_key compose@$host'"
  347. }
  348. backup-action() {
  349. local action="$1"
  350. shift
  351. vps_type=$(vps:get-type) || {
  352. err "Failed to get type of installation."
  353. return 1
  354. }
  355. if ! fn.exists "${vps_type}:${action}"; then
  356. err "type '${vps_type}' has no ${vps_type}:${action} implemented yet."
  357. return 1
  358. fi
  359. "${vps_type}:${action}" "$@"
  360. }
  361. compose:get_default_backup_host_ident() {
  362. local service_name="$1" ## Optional
  363. local compose_file service_cfg cfg target
  364. compose_file=$(compose:get-compose-yml)
  365. service_name="${service_name:-rsync-backup}"
  366. if ! service_cfg=$(cat "$compose_file" |
  367. shyaml get-value -y "$service_name" 2>/dev/null); then
  368. err "No service named '$service_name' found in 'compose.yml'."
  369. return 1
  370. fi
  371. cfg=$(e "$service_cfg" | shyaml get-value -y options) || {
  372. err "No ${WHITE}options${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
  373. "entry in '$compose_file'."
  374. return 1
  375. }
  376. if ! target=$(e "$cfg" | shyaml get-value target); then
  377. err "No ${WHITE}options.target${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
  378. "entry in '$compose_file'."
  379. fi
  380. if ! target=$(e "$cfg" | shyaml get-value target); then
  381. err "No ${WHITE}options.target${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
  382. "entry in '$compose_file'."
  383. fi
  384. if ! ident=$(e "$cfg" | shyaml get-value ident); then
  385. err "No ${WHITE}options.ident${NORMAL} in ${DARKYELLOW}$service_name${NORMAL}'s" \
  386. "entry in '$compose_file'."
  387. fi
  388. echo "$target $ident"
  389. }
  390. mailcow:get_default_backup_host_ident() {
  391. local content cron_line ident found dest cmd_line
  392. if ! [ -e "/etc/cron.d/mirror-dir" ]; then
  393. err "No '/etc/cron.d/mirror-dir' found."
  394. return 1
  395. fi
  396. content=$(cat /etc/cron.d/mirror-dir) || {
  397. err "Can't read '/etc/cron.d/mirror-dir'."
  398. return 1
  399. }
  400. if ! cron_line=$(e "$content" | grep "mirror-dir backup"); then
  401. err "Can't find 'mirror-dir backup' line in '/etc/cron.d/mirror-dir'."
  402. return 1
  403. fi
  404. cron_line=${cron_line%|*}
  405. cmd_line=(${cron_line#*root})
  406. found=
  407. dest=
  408. for arg in "${cmd_line[@]}"; do
  409. [ -n "$found" ] && {
  410. dest="$arg"
  411. break
  412. }
  413. [ "$arg" == "-d" ] && {
  414. found=1
  415. }
  416. done
  417. if ! [[ "$dest" =~ ^[\'\"a-zA-Z0-9:/.-]+$ ]]; then
  418. err "Can't find valid destination in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'."
  419. return 1
  420. fi
  421. if [[ "$dest" == \"*\" ]] || [[ "$dest" == \'*\' ]]; then
  422. ## unquoting, the eval should be safe because of previous check
  423. dest=$(eval e "$dest")
  424. fi
  425. if [ -z "$dest" ]; then
  426. err "Can't find destination in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'."
  427. return 1
  428. fi
  429. ## looking for ident
  430. found=
  431. ident=
  432. for arg in "${cmd_line[@]}"; do
  433. [ -n "$found" ] && {
  434. ident="$arg"
  435. break
  436. }
  437. [ "$arg" == "-h" ] && {
  438. found=1
  439. }
  440. done
  441. if ! [[ "$ident" =~ ^[\'\"a-zA-Z0-9.-]+$ ]]; then
  442. err "Can't find valid identifier in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'."
  443. return 1
  444. fi
  445. if [[ "$ident" == \"*\" ]] || [[ "$ident" == \'*\' ]]; then
  446. ## unquoting, the eval should be safe because of previous check
  447. ident=$(eval e "$ident")
  448. fi
  449. if [ -z "$ident" ]; then
  450. err "Can't find destination in 'mirror-dir backup' arguments from '/etc/cron.d/mirror-dir'."
  451. return 1
  452. fi
  453. echo "$dest $ident"
  454. }
  455. compose:service:containers() {
  456. local project="$1" service="$2"
  457. docker ps \
  458. --filter label="com.docker.compose.project=$project" \
  459. --filter label="compose.master-service=$service" \
  460. --format="{{.ID}}"
  461. }
  462. export -f compose:service:containers
  463. compose:service:container_one() {
  464. local project="$1" service="$2" container_id
  465. {
  466. read-0a container_id || {
  467. err "service ${DARKYELLOW}$service${NORMAL} has no running container."
  468. return 1
  469. }
  470. if read-0a _; then
  471. err "service ${DARKYELLOW}$service${NORMAL} has more than one running container."
  472. return 1
  473. fi
  474. } < <(compose:service:containers "$project" "$service")
  475. echo "$container_id"
  476. }
  477. export -f compose:service:container_one
  478. compose:service:container_first() {
  479. local project="$1" service="$2" container_id
  480. {
  481. read-0a container_id || {
  482. err "service ${DARKYELLOW}$service${NORMAL} has no running container."
  483. return 1
  484. }
  485. if read-0a _; then
  486. warn "service ${DARKYELLOW}$service${NORMAL} has more than one running container."
  487. fi
  488. } < <(compose:service:containers "$project" "$service")
  489. echo "$container_id"
  490. }
  491. export -f compose:service:container_first
  492. docker:running_containers() {
  493. :cache: scope=session
  494. docker ps --format="{{.ID}}"
  495. }
  496. decorator._mangle_fn docker:running_containers
  497. export -f docker:running_containers
  498. compose:project:containers() {
  499. local project="$1" opts
  500. opts+=(--filter label="com.docker.compose.project=$project")
  501. docker ps "${opts[@]}" \
  502. --format="{{.ID}}"
  503. }
  504. export -f compose:project:containers
  505. compose:charm:containers() {
  506. local project="$1" charm="$2"
  507. docker ps \
  508. --filter label="com.docker.compose.project=$project" \
  509. --filter label="compose.charm=$charm" \
  510. --format="{{.ID}}"
  511. }
  512. export -f compose:charm:containers
  513. compose:charm:container_one() {
  514. local project="$1" charm="$2" container_id
  515. {
  516. read-0a container_id || {
  517. err "charm ${DARKPINK}$charm${NORMAL} has no running container in project '$project'."
  518. return 1
  519. }
  520. if read-0a _; then
  521. err "charm ${DARKPINK}$charm${NORMAL} has more than one running container."
  522. return 1
  523. fi
  524. } < <(compose:charm:containers "$project" "$charm")
  525. echo "$container_id"
  526. }
  527. export -f compose:charm:container_one
  528. compose:charm:container_first() {
  529. local project="$1" charm="$2" container_id
  530. {
  531. read-0a container_id || {
  532. warn "charm ${DARKYELLOW}$charm${NORMAL} has no running container in project '$project'."
  533. }
  534. if read-0a _; then
  535. warn "charm ${DARKYELLOW}$charm${NORMAL} has more than one running container."
  536. fi
  537. } < <(compose:charm:containers "$project" "$charm")
  538. echo "$container_id"
  539. }
  540. export -f compose:charm:container_first
  541. compose:get_url() {
  542. local project_name="$1" service="$2" data_file network ip
  543. data_file="/var/lib/compose/relations/${project_name}/${service}-frontend/web-proxy/data"
  544. if [ -e "$data_file" ]; then
  545. (
  546. set -o pipefail
  547. cat "$data_file" | shyaml get-value url
  548. )
  549. else
  550. ## Assume there are no frontend relation here, the url is direct IP
  551. container_id=$(compose:service:container_one "${project_name}" "${service}") || return 1
  552. network_ip=$(docker:container:network_ip_one "${container_id}") || return 1
  553. IFS=":" read -r network ip <<<"$network_ip"
  554. tcp_port=
  555. for port in $(docker:exposed_ports "$container_id"); do
  556. IFS="/" read port type <<<"$port"
  557. [ "$type" == "tcp" ] || continue
  558. tcp_port="$port"
  559. break
  560. done
  561. echo -n "http://$ip"
  562. [ -n "$tcp_port" ] && echo ":$tcp_port"
  563. fi || {
  564. err "Failed querying ${service} to frontend relation to get url."
  565. return 1
  566. }
  567. }
  568. export -f compose:get_url
  569. compose:container:service() {
  570. local container="$1" service
  571. if ! service=$(docker:container:label "$container" "compose.service"); then
  572. err "Failed to get service name from container ${container}."
  573. return 1
  574. fi
  575. if [ -z "$service" ]; then
  576. err "No service found for container ${container}."
  577. return 1
  578. fi
  579. echo "$service"
  580. }
  581. export -f compose:container:service
  582. compose:psql() {
  583. local project_name="$1" dbname="$2" container_id
  584. shift 2
  585. container_id=$(compose:charm:container_one "$project_name" "postgres") || return 1
  586. docker exec -i "${container_id}" psql -U postgres "$dbname" "$@"
  587. }
  588. export -f compose:psql
  589. compose:mongo() {
  590. local project_name="$1" dbname="$2" container_id
  591. container_id=$(compose:charm:container_one "$project_name" "mongo") || return 1
  592. docker exec -i "${container_id}" mongo --quiet "$dbname"
  593. }
  594. export -f compose:mongo
  595. compose:pgm() {
  596. local project_name="$1" container_network_ip container_ip container_network
  597. shift
  598. container_id=$(compose:charm:container_one "$project_name" "postgres") || return 1
  599. service_name=$(compose:container:service "$container_id") || return 1
  600. image_id=$(docker:container:image "$container_id") || return 1
  601. container_network_ip=$(docker:container:network_ip_one "$container_id") || return 1
  602. IFS=":" read -r container_network container_ip <<<"$container_network_ip"
  603. pgpass="/srv/datastore/data/${service_name}/var/lib/postgresql/data/pgpass"
  604. local final_pgm_docker_run_opts+=(
  605. -u 0 -e prefix_pg_local_command=" "
  606. --network "${container_network}"
  607. -e PGHOST="$container_ip"
  608. -e PGUSER=postgres
  609. -v "$pgpass:/root/.pgpass"
  610. "${pgm_docker_run_opts[@]}"
  611. )
  612. cmd=(docker run --rm \
  613. "${final_pgm_docker_run_opts[@]}" \
  614. "${image_id}" pgm "$@"
  615. )
  616. echo "${cmd[@]}"
  617. "${cmd[@]}"
  618. }
  619. export -f compose:pgm
  620. postgres:dump() {
  621. local project_name="$1" src="$2" dst="$3"
  622. (
  623. settmpdir PGM_TMP_LOCATION
  624. pgm_docker_run_opts=('-v' "${PGM_TMP_LOCATION}:/tmp/dump")
  625. compose:pgm "$project_name" cp -f "$src" "/tmp/dump/dump.gz" &&
  626. mv "$PGM_TMP_LOCATION/dump.gz" "$dst"
  627. ) || return 1
  628. }
  629. export -f postgres:dump
  630. postgres:restore() {
  631. local project_name="$1" src="$2" dst="$3"
  632. full_src_path=$(readlink -e "$src") || exit 1
  633. (
  634. pgm_docker_run_opts=('-v' "${full_src_path}:/tmp/dump.gz")
  635. compose:pgm "$project_name" cp -f "/tmp/dump.gz" "$dst"
  636. ) || return 1
  637. }
  638. export -f postgres:restore
  639. odoo:get_public_user_id() {
  640. local project_name="$1" dbname="$2"
  641. echo "select res_id from ir_model_data where model = 'res.users' and name = 'public_user';" |
  642. compose:psql "$project_name" "$dbname" -qAt
  643. }
  644. cyclos:set_root_url() {
  645. local project_name="$1" dbname="$2" url="$3"
  646. echo "UPDATE configurations SET root_url = '$url';" |
  647. compose:psql "$project_name" "$dbname" || {
  648. err "Failed to set cyclos url value in '$dbname' database."
  649. return 1
  650. }
  651. }
  652. export -f cyclos:set_root_url
  653. cyclos:unlock() {
  654. local project_name="$1" dbname="$2"
  655. echo "delete from database_lock;" |
  656. compose:psql "${project_name}" "${dbname}"
  657. }
  658. export -f cyclos:unlock
  659. rocketchat:drop-indexes() {
  660. local project_name="$1" dbname="$2"
  661. echo "db.users.dropIndexes()" |
  662. compose:mongo "${project_name}" "${dbname}"
  663. }
  664. export -f rocketchat:drop-indexes
  665. compose:project_name() {
  666. if [ -z "$PROJECT_NAME" ]; then
  667. PROJECT_NAME=$(compose --get-project-name) || {
  668. err "Couldn't get project name."
  669. return 1
  670. }
  671. if [ -z "$PROJECT_NAME" -o "$PROJECT_NAME" == "orphan" ]; then
  672. err "Couldn't get project name, probably because 'compose.yml' wasn't found."
  673. echo " Please ensure to either configure a global 'compose.yml' or run this command" >&2
  674. echo " in a compose project (with 'compose.yml' on the top level directory)." >&2
  675. return 1
  676. fi
  677. export PROJECT_NAME
  678. fi
  679. echo "$PROJECT_NAME"
  680. }
  681. export -f compose:project_name
  682. compose:get_cron_docker_cmd() {
  683. local cron_line cmd_line docker_cmd
  684. project_name=$(compose:project_name) || return 1
  685. if ! cron_line=$(docker exec "${project_name}"_cron_1 cat /etc/cron.d/rsync-backup | grep "\* \* \*"); then
  686. err "Can't find cron_line in cron container."
  687. echo " Have you forgotten to run 'compose up' ?" >&2
  688. return 1
  689. fi
  690. cron_line=${cron_line%|*}
  691. cron_line=${cron_line%"2>&1"*}
  692. cmd_line="${cron_line#*root}"
  693. eval "args=($cmd_line)"
  694. ## should be last argument
  695. docker_cmd=$(echo ${args[@]: -1})
  696. if ! [[ "$docker_cmd" == "docker run --rm -e "* ]]; then
  697. echo "docker command found should start with 'docker run'." >&2
  698. echo "Here's command:" >&2
  699. echo " $docker_cmd" >&2
  700. return 1
  701. fi
  702. e "$docker_cmd"
  703. }
  704. compose:recover-target() {
  705. local backup_host="$1" ident="$2" src="$3" dst="$4" service_name="${5:-rsync-backup}" project_name
  706. project_name=$(compose:project_name) || return 1
  707. docker_image="${project_name}_${service_name}"
  708. if ! docker_has_image "$docker_image"; then
  709. compose build "${service_name}" || {
  710. err "Couldn't find nor build image for service '$service_name'."
  711. return 1
  712. }
  713. fi
  714. dst="${dst%/}" ## remove final slash
  715. ssh_options=(-o StrictHostKeyChecking=no)
  716. if [[ "$backup_host" == *":"* ]]; then
  717. port="${backup_host##*:}"
  718. backup_host="${backup_host%%:*}"
  719. ssh_options+=(-p "$port")
  720. else
  721. port=""
  722. backup_host="${backup_host%%:*}"
  723. fi
  724. rsync_opts=(
  725. -e "ssh ${ssh_options[*]} -i /var/lib/rsync/.ssh/id_rsa -l rsync"
  726. -azvArH --delete --delete-excluded
  727. --partial --partial-dir .rsync-partial
  728. --numeric-ids
  729. )
  730. if [ "$DRY_RUN" ]; then
  731. rsync_opts+=("-n")
  732. fi
  733. cmd=(
  734. docker run --rm --entrypoint rsync \
  735. -v "/srv/datastore/config/${service_name}/var/lib/rsync":/var/lib/rsync \
  736. -v "${dst%/*}":/mnt/dest \
  737. "$docker_image" \
  738. "${rsync_opts[@]}" "$backup_host":"/var/mirror/$ident/$src" "/mnt/dest/${dst##*/}"
  739. )
  740. echo "${WHITE}Launching: ${NORMAL} ${cmd[@]}"
  741. "${cmd[@]}"
  742. }
  743. mailcow:recover-target() {
  744. local backup_host="$1" ident="$2" src="$3" dst="$4"
  745. dst="${dst%/}" ## remove final slash
  746. ssh_options=(-o StrictHostKeyChecking=no)
  747. if [[ "$backup_host" == *":"* ]]; then
  748. port="${backup_host##*:}"
  749. backup_host="${backup_host%%:*}"
  750. ssh_options+=(-p "$port")
  751. else
  752. port=""
  753. backup_host="${backup_host%%:*}"
  754. fi
  755. rsync_opts=(
  756. -e "ssh ${ssh_options[*]} -i /var/lib/rsync/.ssh/id_rsa -l rsync"
  757. -azvArH --delete --delete-excluded
  758. --partial --partial-dir .rsync-partial
  759. --numeric-ids
  760. )
  761. if [ "$DRY_RUN" ]; then
  762. rsync_opts+=("-n")
  763. fi
  764. cmd=(
  765. rsync "${rsync_opts[@]}" "$backup_host":"/var/mirror/$ident/$src" "${dst}"
  766. )
  767. echo "${WHITE}Launching: ${NORMAL} ${cmd[@]}"
  768. "${cmd[@]}"
  769. }
  770. nextcloud:src:version() {
  771. local version
  772. if ! version=$(cat "/srv/datastore/data/${nextcloud_service}/var/www/html/version.php" 2>/dev/null); then
  773. err "Can't find version.php file to get last version installed."
  774. exit 1
  775. fi
  776. version=$(e "$version" | grep 'VersionString =' | cut -f 3 -d ' ' | cut -f 2 -d "'")
  777. if [ -z "$version" ]; then
  778. err "Can't figure out version from version.php content."
  779. exit 1
  780. fi
  781. echo "$version"
  782. }
  783. container:health:check-fix:container-aliveness() {
  784. local container_id="$1"
  785. timeout 5s docker inspect "$container_id" >/dev/null 2>&1
  786. errlvl=$?
  787. if [ "$errlvl" == 124 ]; then
  788. service_name=$(docker ps --filter id="$container_id" --format '{{.Label "com.docker.compose.service"}}')
  789. container_name=($(docker ps --filter id="$container_id" --format '{{.Names}}'))
  790. pid=$(ps ax -o pid,command -ww | grep docker-containerd-shim |
  791. grep "/$container_id" |
  792. sed -r 's/^ *//g' |
  793. cut -f 1 -d " ")
  794. if [ -z "$pid" ]; then
  795. err "container ${DARKCYAN}${container_name[0]}${NORMAL} for ${DARKYELLOW}$service_name${NORMAL} doesn't answer to 'inspect' command. Can't find its PID neither."
  796. return 1
  797. fi
  798. echo "container ${DARKCYAN}${container_name[0]}${NORMAL} for ${DARKYELLOW}$service_name${NORMAL} doesn't answer to 'inspect' command (pid: $pid)."
  799. Wrap -d "kill pid $pid and restart" <<EOF
  800. kill "$pid"
  801. sleep 2
  802. docker restart "$container_id"
  803. EOF
  804. fi
  805. return $errlvl
  806. }
  807. container:health:check-fix:no-matching-entries() {
  808. local container_id="$1"
  809. out=$(docker exec "$container_id" echo 2>&1)
  810. errlvl=$?
  811. [ "$errlvl" == 0 ] && return 0
  812. service_name=$(docker ps --filter id="$container_id" --format '{{.Label "com.docker.compose.service"}}')
  813. container_name=($(docker ps --filter id="$container_id" --format '{{.Names}}'))
  814. if [ "$errlvl" == 126 ] && [[ "$out" == *"no matching entries in passwd file"* ]]; then
  815. echo "container ${DARKCYAN}${container_name[0]}${NORMAL} for ${DARKYELLOW}$service_name${NORMAL} has ${DARKRED}no-matching-entries${NORMAL} bug." >&2
  816. Wrap -d "restarting container of ${DARKYELLOW}$service_name${NORMAL} twice" <<EOF
  817. docker restart "$container_id"
  818. sleep 2
  819. docker restart "$container_id"
  820. EOF
  821. return $errlvl
  822. fi
  823. warn "Unknown issue with ${DARKYELLOW}$service_name${NORMAL}'s container:"
  824. echo " ${WHITE}cmd:${NORMAL} docker exec -ti $container_id echo" >&2
  825. echo "$out" | prefix " ${DARKGRAY}|${NORMAL} " >&2
  826. echo " ${DARKGRAY}..${NORMAL} leaving this as-is."
  827. return $errlvl
  828. }
  829. docker:api() {
  830. local endpoint="$1"
  831. curl -sS --unix-socket /var/run/docker.sock "http://localhost$endpoint"
  832. }
  833. docker:containers:id() {
  834. docker:api /containers/json | jq -r ".[] | .Id"
  835. }
  836. docker:containers:names() {
  837. docker:api /containers/json | jq -r '.[] | .Names[0] | ltrimstr("/")'
  838. }
  839. docker:container:stats() {
  840. container="$1"
  841. docker:api "/containers/$container/stats?stream=false"
  842. }
  843. docker:containers:stats() {
  844. :cache: scope=session
  845. local jobs='' line container id_names sha names name data service project
  846. local DC="com.docker.compose"
  847. local PSF_values=(
  848. ".ID" ".Names" ".Label \"$DC.project\"" ".Label \"$DC.service\"" ".Image"
  849. )
  850. local PSF="$(printf "{{%s}} " "${PSF_values[@]}")"
  851. id_names=$(docker ps -a --format="$PSF") || return 1
  852. ## Create a docker container table from name/sha to service, project, image_name
  853. declare -A resolve
  854. while read-0a line; do
  855. sha=${line%% *}; line=${line#* }
  856. names=${line%% *}; line=${line#* }
  857. names=(${names//,/ })
  858. for name in "${names[@]}"; do
  859. resolve["$name"]="$line"
  860. done
  861. resolve["$sha"]="$line"
  862. done < <(printf "%s\n" "$id_names")
  863. declare -A data
  864. while read-0a line; do
  865. name=${line%% *}; line=${line#* }
  866. ts=${line%% *}; line=${line#* }
  867. resolved="${resolve["$name"]}"
  868. project=${resolved%% *}; resolved=${resolved#* }
  869. service=${resolved%% *}; resolved=${resolved#* }
  870. image_name="$resolved"
  871. if [ -z "$service" ]; then
  872. project="@"
  873. service=$(docker inspect "$image_name" | jq -r '.[0].RepoTags[0]')
  874. service=${service//\//_}
  875. fi
  876. if [ -n "${data["$project/$service"]}" ]; then
  877. previous=(${data["$project/$service"]})
  878. previous=(${previous[@]:1})
  879. current=($line)
  880. sum=()
  881. i=0; max=${#previous[@]}
  882. while (( i < max )); do
  883. sum+=($((${previous[$i]} + ${current[$i]})))
  884. ((i++))
  885. done
  886. data["$project/$service"]="$ts ${sum[*]}"
  887. else
  888. data["$project/$service"]="$ts $line"
  889. fi
  890. done < <(
  891. for container in "$@"; do
  892. (
  893. docker:container:stats "${container}" |
  894. jq -r '
  895. (.name | ltrimstr("/"))
  896. + " " + (.read | sub("\\.[0-9]+Z"; "Z") | fromdate | tostring)
  897. + " " + (.memory_stats.usage | tostring)
  898. + " " + (.memory_stats.stats.inactive_file | tostring)
  899. + " " + ((.memory_stats.usage - .memory_stats.stats.inactive_file) | tostring)
  900. + " " + (.memory_stats.limit | tostring)
  901. + " " + (.networks.eth0.rx_bytes | tostring)
  902. + " " + (.networks.eth0.rx_packets | tostring)
  903. + " " + (.networks.eth0.rx_errors | tostring)
  904. + " " + (.networks.eth0.rx_dropped | tostring)
  905. + " " + (.networks.eth0.tx_bytes | tostring)
  906. + " " + (.networks.eth0.tx_packets | tostring)
  907. + " " + (.networks.eth0.tx_errors | tostring)
  908. + " " + (.networks.eth0.tx_dropped | tostring)
  909. '
  910. ) &
  911. jobs=1
  912. done
  913. [ -n "$jobs" ] && wait
  914. )
  915. for label in "${!data[@]}"; do
  916. echo "$label ${data[$label]}"
  917. done
  918. }
  919. decorator._mangle_fn docker:containers:stats
  920. export -f docker:containers:stats
  921. col:normalize:size() {
  922. local alignment=$1
  923. awk -v alignment="$alignment" '{
  924. # Store the entire line in the lines array.
  925. lines[NR] = $0;
  926. # Split the line into fields.
  927. split($0, fields);
  928. # Update max for each field.
  929. for (i = 1; i <= length(fields); i++) {
  930. if (length(fields[i]) > max[i]) {
  931. max[i] = length(fields[i]);
  932. }
  933. }
  934. }
  935. END {
  936. # Print lines with fields padded to max.
  937. for (i = 1; i <= NR; i++) {
  938. split(lines[i], fields);
  939. line = "";
  940. for (j = 1; j <= length(fields); j++) {
  941. # Get alignment for the current field.
  942. align = substr(alignment, j, 1);
  943. if (align != "+") {
  944. align = "-"; # Default to left alignment if not "+".
  945. }
  946. line = line sprintf("%" align max[j] "s ", fields[j]);
  947. }
  948. print line;
  949. }
  950. }'
  951. }
  952. rrd:create() {
  953. local prefix="$1"
  954. shift
  955. local label="$1" step="300" src_def
  956. shift
  957. if [ -z "$VAR_DIR" ]; then
  958. err "Unset \$VAR_DIR, can't create rrd graph"
  959. return 1
  960. fi
  961. mkdir -p "$VAR_DIR"
  962. if ! [ -d "$VAR_DIR" ]; then
  963. err "Invalid \$VAR_DIR: '$VAR_DIR' is not a directory"
  964. return 1
  965. fi
  966. if ! type -p rrdtool >/dev/null 2>&1; then
  967. apt-get install rrdtool -y --force-yes </dev/null
  968. if ! type -p rrdtool 2>/dev/null 2>&1; then
  969. err "Couldn't find nor install 'rrdtool'."
  970. return 1
  971. fi
  972. fi
  973. local RRD_PATH="$VAR_DIR/rrd"
  974. local RRD_FILE="$RRD_PATH/$prefix/$label.rrd"
  975. mkdir -p "${RRD_FILE%/*}"
  976. if [ -f "$RRD_FILE" ]; then
  977. err "File '$RRD_FILE' already exists, use a different label."
  978. return 1
  979. fi
  980. local rrd_ds_opts=()
  981. for src_def in "$@"; do
  982. IFS=":" read -r name type min max rra_types <<<"$src_def"
  983. rra_types=${rra_types:-average,max,min}
  984. rrd_ds_opts+=("DS:$name:$type:900:$min:$max")
  985. done
  986. local step=120
  987. local times=( ## with steps 120 is 2mn datapoint
  988. 2m:1w
  989. 6m:3w
  990. 30m:12w
  991. 3h:1y
  992. 1d:10y
  993. 1w:2080w
  994. )
  995. rrd_rra_opts=()
  996. for time in "${times[@]}"; do
  997. rrd_rra_opts+=("RRA:"{AVERAGE,MIN,MAX}":0.5:$time")
  998. done
  999. cmd=(
  1000. rrdtool create "$RRD_FILE" \
  1001. --step "$step" \
  1002. "${rrd_ds_opts[@]}" \
  1003. "${rrd_rra_opts[@]}"
  1004. )
  1005. "${cmd[@]}" || {
  1006. err "Failed command: ${cmd[@]}"
  1007. return 1
  1008. }
  1009. }
  1010. rrd:update() {
  1011. local prefix="$1"
  1012. shift
  1013. while read-0a data; do
  1014. [ -z "$data" ] && continue
  1015. IFS="~" read -ra data <<<"${data// /\~}"
  1016. label="${data[0]}"
  1017. ts="${data[1]}"
  1018. for arg in "$@"; do
  1019. IFS="|" read -r name arg <<<"$arg"
  1020. rrd_label="${label}/${name}"
  1021. rrd_create_opt=()
  1022. rrd_update_opt="$ts"
  1023. for col_def in ${arg//,/ }; do
  1024. col=${col_def%%:*}; create_def=${col_def#*:}
  1025. rrd_update_opt="${rrd_update_opt}:${data[$col]}"
  1026. rrd_create_opt+=("$create_def")
  1027. done
  1028. local RRD_ROOT_PATH="$VAR_DIR/rrd"
  1029. local RRD_PATH="$RRD_ROOT_PATH/${prefix%/}"
  1030. local RRD_FILE="${RRD_PATH%/}/${rrd_label#/}.rrd"
  1031. if ! [ -f "$RRD_FILE" ]; then
  1032. info "Creating new RRD file '${RRD_FILE#$RRD_ROOT_PATH/}'"
  1033. if ! rrd:create "$prefix" "${rrd_label}" "${rrd_create_opt[@]}" </dev/null ; then
  1034. err "Couldn't create new RRD file ${rrd_label} with options: '${rrd_create_opt[*]}'"
  1035. return 1
  1036. fi
  1037. fi
  1038. rrdtool update "$RRD_FILE" "$rrd_update_opt" || {
  1039. err "update failed with options: '$rrd_update_opt'"
  1040. return 1
  1041. }
  1042. done
  1043. done
  1044. }
  1045. [ "$SOURCED" ] && return 0
  1046. ##
  1047. ## Command line processing
  1048. ##
  1049. cmdline.spec.gnu
  1050. cmdline.spec.reporting
  1051. cmdline.spec.gnu install
  1052. cmdline.spec::cmd:install:run() {
  1053. :
  1054. }
  1055. cmdline.spec.gnu get-type
  1056. cmdline.spec::cmd:get-type:run() {
  1057. vps:get-type
  1058. }
  1059. cmdline.spec:install:cmd:backup:run() {
  1060. : :posarg: BACKUP_SERVER 'Target backup server'
  1061. : :optfla: --ignore-domain-check \
  1062. "Allow to bypass the domain check in
  1063. compose file (only used in compose
  1064. installation)."
  1065. : :optfla: --ignore-ping-check "Allow to bypass the ping check of host."
  1066. local vps_type
  1067. vps_type=$(vps:get-type) || {
  1068. err "Failed to get type of installation."
  1069. return 1
  1070. }
  1071. if ! fn.exists "${vps_type}:install-backup"; then
  1072. err "type '${vps_type}' has no backup installation implemented yet."
  1073. return 1
  1074. fi
  1075. opts=()
  1076. [ "$opt_ignore_ping_check" ] &&
  1077. opts+=("--ignore-ping-check")
  1078. if [ "$vps_type" == "compose" ]; then
  1079. [ "$opt_ignore_domain_check" ] &&
  1080. opts+=("--ignore-domain-check")
  1081. fi
  1082. "cmdline.spec:install:cmd:${vps_type}-backup:run" "${opts[@]}" "$BACKUP_SERVER"
  1083. }
  1084. DEFAULT_BACKUP_SERVICE_NAME=rsync-backup
  1085. cmdline.spec.gnu compose-backup
  1086. cmdline.spec:install:cmd:compose-backup:run() {
  1087. : :posarg: BACKUP_SERVER 'Target backup server'
  1088. : :optval: --service-name,-s "YAML service name in compose
  1089. file to check for existence of key.
  1090. Defaults to '$DEFAULT_BACKUP_SERVICE_NAME'"
  1091. : :optval: --compose-file,-f "Compose file location. Defaults to
  1092. the value of '\$DEFAULT_COMPOSE_FILE'"
  1093. : :optfla: --ignore-domain-check \
  1094. "Allow to bypass the domain check in
  1095. compose file."
  1096. : :optfla: --ignore-ping-check "Allow to bypass the ping check of host."
  1097. local service_name compose_file
  1098. [ -e "/etc/compose/local.conf" ] && source /etc/compose/local.conf
  1099. compose_file=${opt_compose_file:-$DEFAULT_COMPOSE_FILE}
  1100. service_name=${opt_service_name:-$DEFAULT_BACKUP_SERVICE_NAME}
  1101. if ! [ -e "$compose_file" ]; then
  1102. err "Compose file not found in '$compose_file'."
  1103. return 1
  1104. fi
  1105. compose:install-backup "$BACKUP_SERVER" "$service_name" "$compose_file" \
  1106. "$opt_ignore_ping_check" "$opt_ignore_domain_check"
  1107. }
  1108. cmdline.spec:install:cmd:mailcow-backup:run() {
  1109. : :posarg: BACKUP_SERVER 'Target backup server'
  1110. : :optfla: --ignore-ping-check "Allow to bypass the ping check of host."
  1111. "mailcow:install-backup" "$BACKUP_SERVER" "$opt_ignore_ping_check"
  1112. }
  1113. cmdline.spec.gnu backup
  1114. cmdline.spec::cmd:backup:run() {
  1115. local vps_type
  1116. vps_type=$(vps:get-type) || {
  1117. err "Failed to get type of installation."
  1118. return 1
  1119. }
  1120. if ! fn.exists "cmdline.spec:backup:cmd:${vps_type}:run"; then
  1121. err "type '${vps_type}' has no backup process implemented yet."
  1122. return 1
  1123. fi
  1124. "cmdline.spec:backup:cmd:${vps_type}:run"
  1125. }
  1126. cmdline.spec:backup:cmd:mailcow:run() {
  1127. local cmd_line cron_line cmd
  1128. for f in mysql-backup mirror-dir; do
  1129. [ -e "/etc/cron.d/$f" ] || {
  1130. err "Can't find '/etc/cron.d/$f'."
  1131. echo " Have you forgotten to run 'vps install backup BACKUP_HOST' ?" >&2
  1132. return 1
  1133. }
  1134. if ! cron_line=$(cat "/etc/cron.d/$f" |
  1135. grep -v "^#" | grep "\* \* \*"); then
  1136. err "Can't find cron_line in '/etc/cron.d/$f'." \
  1137. "Have you modified it ?"
  1138. return 1
  1139. fi
  1140. cron_line=${cron_line%|*}
  1141. cmd_line=(${cron_line#*root})
  1142. if [ "$f" == "mirror-dir" ]; then
  1143. cmd=()
  1144. for arg in "${cmd_line[@]}"; do
  1145. [ "$arg" != "-q" ] && cmd+=("$arg")
  1146. done
  1147. else
  1148. cmd=("${cmd_line[@]}")
  1149. fi
  1150. code="${cmd[*]}"
  1151. echo "${WHITE}Launching:${NORMAL} ${code}"
  1152. {
  1153. {
  1154. (
  1155. ## Some commands are using colors that are already
  1156. ## set by this current program and will trickle
  1157. ## down unwantedly
  1158. ansi_color no
  1159. eval "${code}"
  1160. ) | sed -r "s/^/ ${GRAY}|${NORMAL} /g"
  1161. set_errlvl "${PIPESTATUS[0]}"
  1162. } 3>&1 1>&2 2>&3 | sed -r "s/^/ $DARKRED\!$NORMAL /g"
  1163. set_errlvl "${PIPESTATUS[0]}"
  1164. } 3>&1 1>&2 2>&3
  1165. if [ "$?" != "0" ]; then
  1166. err "Failed."
  1167. return 1
  1168. fi
  1169. done
  1170. info "Mysql backup and subsequent mirror-dir ${DARKGREEN}succeeded${NORMAL}."
  1171. }
  1172. set_errlvl() { return "${1:-1}"; }
  1173. cmdline.spec:backup:cmd:compose:run() {
  1174. local cron_line args
  1175. project_name=$(compose:project_name) || return 1
  1176. docker_cmd=$(compose:get_cron_docker_cmd) || return 1
  1177. echo "${WHITE}Launching:${NORMAL} docker exec -i "${project_name}_cron_1" $docker_cmd"
  1178. {
  1179. {
  1180. eval "docker exec -i \"${project_name}_cron_1\" $docker_cmd" | sed -r "s/^/ ${GRAY}|${NORMAL} /g"
  1181. set_errlvl "${PIPESTATUS[0]}"
  1182. } 3>&1 1>&2 2>&3 | sed -r "s/^/ $DARKRED\!$NORMAL /g"
  1183. set_errlvl "${PIPESTATUS[0]}"
  1184. } 3>&1 1>&2 2>&3
  1185. if [ "$?" != "0" ]; then
  1186. err "Failed."
  1187. return 1
  1188. fi
  1189. info "mirror-dir ${DARKGREEN}succeeded${NORMAL}."
  1190. }
  1191. cmdline.spec.gnu recover-target
  1192. cmdline.spec::cmd:recover-target:run() {
  1193. : :posarg: BACKUP_DIR 'Source directory on backup side'
  1194. : :posarg: HOST_DIR 'Target directory on host side'
  1195. : :optval: --backup-host,-B "The backup host"
  1196. : :optfla: --dry-run,-n "Don't do anything, instead tell what it
  1197. would do."
  1198. ## if no backup host take the one by default
  1199. backup_host="$opt_backup_host"
  1200. if [ -z "$backup_host" ]; then
  1201. backup_host_ident=$(backup-action get_default_backup_host_ident) || return 1
  1202. read -r backup_host ident <<<"$backup_host_ident"
  1203. fi
  1204. if [[ "$BACKUP_DIR" == /* ]]; then
  1205. err "BACKUP_DIR must be a relative path from the root of your backup."
  1206. return 1
  1207. fi
  1208. REAL_HOST_DIR=$(realpath "$HOST_DIR") || {
  1209. err "Can't find HOST_DIR '$HOST_DIR'."
  1210. return 1
  1211. }
  1212. export DRY_RUN="${opt_dry_run}"
  1213. backup-action recover-target "$backup_host" "$ident" "$BACKUP_DIR" "$REAL_HOST_DIR"
  1214. }
  1215. cmdline.spec.gnu odoo
  1216. cmdline.spec::cmd:odoo:run() {
  1217. :
  1218. }
  1219. cmdline.spec.gnu restart
  1220. cmdline.spec:odoo:cmd:restart:run() {
  1221. : :optval: --service,-s "The service (defaults to 'odoo')"
  1222. local out odoo_service
  1223. odoo_service="${opt_service:-odoo}"
  1224. project_name=$(compose:project_name) || return 1
  1225. if ! out=$(docker restart "${project_name}_${odoo_service}_1" 2>&1); then
  1226. if [[ "$out" == *"no matching entries in passwd file" ]]; then
  1227. warn "Catched docker bug. Restarting once more."
  1228. if ! out=$(docker restart "${project_name}_${odoo_service}_1"); then
  1229. err "Can't restart container ${project_name}_${odoo_service}_1 (restarted twice)."
  1230. echo " output:" >&2
  1231. echo "$out" | prefix " ${GRAY}|${NORMAL} " >&2
  1232. exit 1
  1233. fi
  1234. else
  1235. err "Couldn't restart container ${project_name}_${odoo_service}_1 (and no restart bug detected)."
  1236. exit 1
  1237. fi
  1238. fi
  1239. info "Container ${project_name}_${odoo_service}_1 was ${DARKGREEN}successfully${NORMAL} restarted."
  1240. }
  1241. cmdline.spec.gnu restore
  1242. cmdline.spec:odoo:cmd:restore:run() {
  1243. : :posarg: ZIP_DUMP_LOCATION 'Source odoo dump file to restore
  1244. (can be a local file or an url)'
  1245. : :optval: --service,-s "The service (defaults to 'odoo')"
  1246. : :optval: --database,-d 'Target database (default if not specified)'
  1247. local out
  1248. odoo_service="${opt_service:-odoo}"
  1249. if [[ "$ZIP_DUMP_LOCATION" == "http://"* ]] ||
  1250. [[ "$ZIP_DUMP_LOCATION" == "https://"* ]]; then
  1251. settmpdir ZIP_TMP_LOCATION
  1252. tmp_location="$ZIP_TMP_LOCATION/dump.zip"
  1253. curl -k -s -L "$ZIP_DUMP_LOCATION" > "$tmp_location" || {
  1254. err "Couldn't get '$ZIP_DUMP_LOCATION'."
  1255. exit 1
  1256. }
  1257. if [[ "$(dd if="$tmp_location" count=2 bs=1 2>/dev/null)" != "PK" ]]; then
  1258. err "Download doesn't seem to be a zip file."
  1259. dd if="$tmp_location" count=1 bs=256 | hd | prefix " ${GRAY}|${NORMAL} " >&2
  1260. exit 1
  1261. fi
  1262. info "Successfully downloaded '$ZIP_DUMP_LOCATION'"
  1263. echo " in '$tmp_location'." >&2
  1264. ZIP_DUMP_LOCATION="$tmp_location"
  1265. fi
  1266. [ -e "$ZIP_DUMP_LOCATION" ] || {
  1267. err "No file '$ZIP_DUMP_LOCATION' found." >&2
  1268. exit 1
  1269. }
  1270. #cmdline.spec:odoo:cmd:restart:run --service "$odoo_service" || exit 1
  1271. msg_dbname=default
  1272. [ -n "$opt_database" ] && msg_dbname="'$opt_database'"
  1273. compose --no-hooks drop "$odoo_service" $opt_database || {
  1274. err "Error dropping $msg_dbname database of service ${DARKYELLOW}$odoo_service${NORMAL}."
  1275. exit 1
  1276. }
  1277. compose --no-hooks load "$odoo_service" $opt_database < "$ZIP_DUMP_LOCATION" || {
  1278. err "Error restoring service ${DARKYELLOW}$odoo_service${NORMAL} to $msg_dbname database."
  1279. exit 1
  1280. }
  1281. info "Successfully restored ${DARKYELLOW}$odoo_service${NORMAL} to $msg_dbname database."
  1282. ## Restart odoo, ensure there is no bugs lingering on it.
  1283. cmdline.spec:odoo:cmd:restart:run --service "$odoo_service" || exit 1
  1284. }
  1285. cmdline.spec.gnu dump
  1286. cmdline.spec:odoo:cmd:dump:run() {
  1287. : :posarg: DUMP_ZIPFILE 'Target path to store odoo dump zip file.'
  1288. : :optval: --database,-d 'Target database (default if not specified)'
  1289. : :optval: --service,-s "The service (defaults to 'odoo')"
  1290. odoo_service="${opt_service:-odoo}"
  1291. msg_dbname=default
  1292. [ -n "$opt_database" ] && msg_dbname="'$opt_database'"
  1293. compose --no-hooks save "$odoo_service" $opt_database > "$DUMP_ZIPFILE" || {
  1294. err "Error dumping ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database to '$DUMP_ZIPFILE'."
  1295. exit 1
  1296. }
  1297. info "Successfully dumped ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database to '$DUMP_ZIPFILE'."
  1298. }
  1299. cmdline.spec.gnu drop
  1300. cmdline.spec:odoo:cmd:drop:run() {
  1301. : :optval: --database,-d 'Target database (default if not specified)'
  1302. : :optval: --service,-s "The service (defaults to 'odoo')"
  1303. odoo_service="${opt_service:-odoo}"
  1304. msg_dbname=default
  1305. [ -n "$opt_database" ] && msg_dbname="'$opt_database'"
  1306. compose --no-hooks drop "$odoo_service" $opt_database || {
  1307. err "Error dropping ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database."
  1308. exit 1
  1309. }
  1310. info "Successfully dropped ${DARKYELLOW}$odoo_service${NORMAL}'s $msg_dbname database."
  1311. }
  1312. cmdline.spec.gnu set-cyclos-url
  1313. cmdline.spec:odoo:cmd:set-cyclos-url:run() {
  1314. : :optval: --database,-d "Target database ('odoo' if not specified)"
  1315. : :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
  1316. local URL
  1317. dbname=${opt_database:-odoo}
  1318. cyclos_service="${opt_service:-cyclos}"
  1319. project_name=$(compose:project_name) || exit 1
  1320. URL=$(compose:get_url "${project_name}" "${cyclos_service}") || exit 1
  1321. Wrap -d "set cyclos url to '$URL'" <<EOF || exit 1
  1322. echo "UPDATE res_company SET cyclos_server_url = '$URL/api' WHERE id=1;" |
  1323. compose:psql "$project_name" "$dbname" || {
  1324. err "Failed to set cyclos url value in '$dbname' database."
  1325. exit 1
  1326. }
  1327. EOF
  1328. }
  1329. cmdline.spec.gnu fix-sso
  1330. cmdline.spec:odoo:cmd:fix-sso:run() {
  1331. : :optval: --database,-d "Target database ('odoo' if not specified)"
  1332. local public_user_id project_name dbname
  1333. dbname=${opt_database:-odoo}
  1334. project_name=$(compose:project_name) || exit 1
  1335. public_user_id=$(odoo:get_public_user_id "${project_name}" "${dbname}") || exit 1
  1336. Wrap -d "fix website's object to 'public_user' (id=$public_user_id)" <<EOF || exit 1
  1337. echo "UPDATE website SET user_id = $public_user_id;" |
  1338. compose:psql "$project_name" "$dbname" || {
  1339. err "Failed to set website's object user_id to public user's id ($public_user_id) in '$dbname' database."
  1340. exit 1
  1341. }
  1342. EOF
  1343. }
  1344. cmdline.spec.gnu cyclos
  1345. cmdline.spec::cmd:cyclos:run() {
  1346. :
  1347. }
  1348. cmdline.spec:cyclos:cmd:dump:run() {
  1349. : :posarg: DUMP_GZFILE 'Target path to store odoo dump gz file.'
  1350. : :optval: --database,-d "Target database ('cyclos' if not specified)"
  1351. : :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
  1352. cyclos_service="${opt_service:-cyclos}"
  1353. cyclos_database="${opt_database:-cyclos}"
  1354. project_name=$(compose:project_name) || exit 1
  1355. container_id=$(compose:service:container_one "$project_name" "${cyclos_service}") || exit 1
  1356. Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1357. docker stop "$container_id" || exit 1
  1358. Wrap -d "Dump postgres database '${cyclos_database}'." -- \
  1359. postgres:dump "${project_name}" "$cyclos_database" "$DUMP_GZFILE" || exit 1
  1360. Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1361. docker start "${container_id}" || exit 1
  1362. }
  1363. cmdline.spec.gnu restore
  1364. cmdline.spec:cyclos:cmd:restore:run() {
  1365. : :posarg: GZ_DUMP_LOCATION 'Source cyclos dump file to restore
  1366. (can be a local file or an url)'
  1367. : :optval: --service,-s "The service (defaults to 'cyclos')"
  1368. : :optval: --database,-d 'Target database (default if not specified)'
  1369. local out
  1370. cyclos_service="${opt_service:-cyclos}"
  1371. cyclos_database="${opt_database:-cyclos}"
  1372. project_name=$(compose:project_name) || exit 1
  1373. url=$(compose:get_url "${project_name}" "${cyclos_service}") || return 1
  1374. container_id=$(compose:service:container_one "$project_name" "${cyclos_service}") || exit 1
  1375. if [[ "$GZ_DUMP_LOCATION" == "http://"* ]] ||
  1376. [[ "$GZ_DUMP_LOCATION" == "https://"* ]]; then
  1377. settmpdir GZ_TMP_LOCATION
  1378. tmp_location="$GZ_TMP_LOCATION/dump.gz"
  1379. Wrap -d "get '$GZ_DUMP_LOCATION'" <<EOF || exit 1
  1380. ## Note that curll version before 7.76.0 do not have
  1381. curl -k -s -L "$GZ_DUMP_LOCATION" --fail \\
  1382. > "$tmp_location" || {
  1383. echo "Error fetching ressource. Is url correct ?" >&2
  1384. exit 1
  1385. }
  1386. if [[ "\$(dd if="$tmp_location" count=2 bs=1 2>/dev/null |
  1387. hexdump -v -e "/1 \"%02x\"")" != "1f8b" ]]; then
  1388. err "Download doesn't seem to be a gzip file."
  1389. dd if="$tmp_location" count=1 bs=256 | hd | prefix " ${GRAY}|${NORMAL} " >&2
  1390. exit 1
  1391. fi
  1392. EOF
  1393. GZ_DUMP_LOCATION="$tmp_location"
  1394. fi
  1395. [ -e "$GZ_DUMP_LOCATION" ] || {
  1396. err "No file '$GZ_DUMP_LOCATION' found." >&2
  1397. exit 1
  1398. }
  1399. Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1400. docker stop "$container_id" || exit 1
  1401. ## XXXvlab: making the assumption that the postgres username should
  1402. ## be the same as the cyclos service selected (which is the default,
  1403. ## but not always the case).
  1404. Wrap -d "restore postgres database '${cyclos_database}'." -- \
  1405. postgres:restore "$project_name" "$GZ_DUMP_LOCATION" "${cyclos_service}@${cyclos_database}" || exit 1
  1406. ## ensure that the database is not locked
  1407. Wrap -d "check and remove database lock if any" -- \
  1408. cyclos:unlock "${project_name}" "${cyclos_database}" || exit 1
  1409. Wrap -d "set root url to '$url'" -- \
  1410. cyclos:set_root_url "${project_name}" "${cyclos_database}" "${url}" || exit 1
  1411. Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1412. docker start "${container_id}" || exit 1
  1413. }
  1414. cmdline.spec.gnu set-root-url
  1415. cmdline.spec:cyclos:cmd:set-root-url:run() {
  1416. : :optval: --database,-d "Target database ('cyclos' if not specified)"
  1417. : :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
  1418. local URL
  1419. cyclos_database=${opt_database:-cyclos}
  1420. cyclos_service="${opt_service:-cyclos}"
  1421. project_name=$(compose:project_name) || exit 1
  1422. url=$(compose:get_url "${project_name}" "${cyclos_service}") || exit 1
  1423. container_id=$(compose:service:container_one "${project_name}" "${cyclos_service}") || exit 1
  1424. Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1425. docker stop "$container_id" || exit 1
  1426. Wrap -d "set root url to '$url'" -- \
  1427. cyclos:set_root_url "${project_name}" "${cyclos_database}" "${url}" || exit 1
  1428. Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1429. docker start "${container_id}" || exit 1
  1430. }
  1431. cmdline.spec.gnu unlock
  1432. cmdline.spec:cyclos:cmd:unlock:run() {
  1433. : :optval: --database,-d "Target database ('cyclos' if not specified)"
  1434. : :optval: --service,-s "The cyclos service name (defaults to 'cyclos')"
  1435. local URL
  1436. cyclos_database=${opt_database:-cyclos}
  1437. cyclos_service="${opt_service:-cyclos}"
  1438. project_name=$(compose:project_name) || exit 1
  1439. container_id=$(compose:service:container_one "${project_name}" "${cyclos_service}") || exit 1
  1440. Wrap -d "stop ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1441. docker stop "$container_id" || exit 1
  1442. Wrap -d "check and remove database lock if any" -- \
  1443. cyclos:unlock "${project_name}" "${cyclos_database}" || exit 1
  1444. Wrap -d "start ${DARKYELLOW}${cyclos_service}${NORMAL}'s container" -- \
  1445. docker start "${container_id}" || exit 1
  1446. }
  1447. cmdline.spec.gnu rocketchat
  1448. cmdline.spec::cmd:rocketchat:run() {
  1449. :
  1450. }
  1451. cmdline.spec.gnu drop-indexes
  1452. cmdline.spec:rocketchat:cmd:drop-indexes:run() {
  1453. : :optval: --database,-d "Target database ('rocketchat' if not specified)"
  1454. : :optval: --service,-s "The rocketchat service name (defaults to 'rocketchat')"
  1455. local URL
  1456. rocketchat_database=${opt_database:-rocketchat}
  1457. rocketchat_service="${opt_service:-rocketchat}"
  1458. project_name=$(compose:project_name) || exit 1
  1459. container_id=$(compose:service:container_one "${project_name}" "${rocketchat_service}") || exit 1
  1460. Wrap -d "stop ${DARKYELLOW}${rocketchat_service}${NORMAL}'s container" -- \
  1461. docker stop "$container_id" || exit 1
  1462. errlvl=0
  1463. Wrap -d "drop indexes" -- \
  1464. rocketchat:drop-indexes "${project_name}" "${rocketchat_database}" || {
  1465. errlvl=1
  1466. errmsg="Failed to drop indexes"
  1467. }
  1468. Wrap -d "start ${DARKYELLOW}${rocketchat_service}${NORMAL}'s container" -- \
  1469. docker start "${container_id}" || exit 1
  1470. if [ "$errlvl" != 0 ]; then
  1471. err "$errmsg"
  1472. fi
  1473. exit "$errlvl"
  1474. }
  1475. cmdline.spec.gnu nextcloud
  1476. cmdline.spec::cmd:nextcloud:run() {
  1477. :
  1478. }
  1479. cmdline.spec.gnu upgrade
  1480. cmdline.spec:nextcloud:cmd:upgrade:run() {
  1481. : :posarg: [TARGET_VERSION] "Source cyclos dump file to restore"
  1482. : :optval: --service,-s "The nexcloud service name (defaults to 'nextcloud')"
  1483. local URL
  1484. nextcloud_service="${opt_service:-nextcloud}"
  1485. project_name=$(compose:project_name) || exit 1
  1486. containers=$(compose:service:containers "${project_name}" "${nextcloud_service}") || exit 1
  1487. container_stopped=()
  1488. if [ -n "$containers" ]; then
  1489. for container in $containers; do
  1490. Wrap -d "stop ${DARKYELLOW}${nextcloud_service}${NORMAL}'s container" -- \
  1491. docker stop "$container" || {
  1492. err "Failed to stop container '$container'."
  1493. exit 1
  1494. }
  1495. container_stopped+=("$container")
  1496. done
  1497. fi
  1498. before_version=$(nextcloud:src:version) || exit 1
  1499. ## -q to remove the display of ``compose`` related information
  1500. ## like relation resolution.
  1501. ## --no-hint to remove the final hint about modifying your
  1502. ## ``compose.yml``.
  1503. compose -q upgrade "$nextcloud_service" --no-hint
  1504. errlvl="$?"
  1505. after_version=$(nextcloud:src:version)
  1506. if [ "$after_version" != "$before_version" ]; then
  1507. desc="update \`compose.yml\` to set ${DARKYELLOW}$nextcloud_service${NORMAL}'s "
  1508. desc+="docker image to actual code version ${WHITE}${after_version}${NORMAL}"
  1509. Wrap -d "$desc" -- \
  1510. compose:file:value-change \
  1511. "${nextcloud_service}.docker-compose.image" \
  1512. "docker.0k.io/nextcloud:${after_version}-myc" || exit 1
  1513. fi
  1514. if [ "$errlvl" == 0 ]; then
  1515. echo "${WHITE}Launching final compose${NORMAL}"
  1516. compose up || exit 1
  1517. fi
  1518. exit "$errlvl"
  1519. }
  1520. cmdline.spec.gnu check-fix
  1521. cmdline.spec::cmd:check-fix:run() {
  1522. : :posarg: [SERVICES...] "Optional service to check"
  1523. : :optval: --check,-c "Specify a check or a list of checks separated by commas"
  1524. : :optfla: --silent,-s "Don't ouput anything if everything goes well"
  1525. local project_name service_name containers container check
  1526. all_checks=$(declare -F |
  1527. egrep '^declare -fx? container:health:check-fix:[^ ]+$' |
  1528. cut -f 4 -d ":")
  1529. checks=(${opt_check//,/ })
  1530. for check in "${checks[@]}"; do
  1531. fn.exists container:health:check-fix:$check || {
  1532. err "check '$check' not found."
  1533. return 1
  1534. }
  1535. done
  1536. if [ "${#checks[*]}" == 0 ]; then
  1537. checks=($all_checks)
  1538. fi
  1539. ## XXXvlab: could make it parallel
  1540. project_name=$(compose:project_name) || exit 1
  1541. containers=($(compose:project:containers "${project_name}")) || exit 1
  1542. found=
  1543. for container in "${containers[@]}"; do
  1544. service_name=$(docker ps --filter id="$container" --format '{{.Label "com.docker.compose.service"}}')
  1545. if [ "${#SERVICES[@]}" -gt 0 ]; then
  1546. [[ " ${SERVICES[*]} " == *" $service_name "* ]] || continue
  1547. fi
  1548. found=1
  1549. one_bad=
  1550. for check in "${checks[@]}"; do
  1551. if ! container:health:check-fix:"$check" "$container"; then
  1552. one_bad=1
  1553. fi
  1554. done
  1555. if [ -z "$opt_silent" ] && [ -z "$one_bad" ]; then
  1556. Elt "containers have been checked for ${DARKYELLOW}$service_name${NORMAL}"
  1557. Feedback
  1558. fi
  1559. done
  1560. if [ -z "$found" ]; then
  1561. if [ -z "$opt_silent" ]; then
  1562. if [ "${#SERVICES[@]}" -gt 0 ]; then
  1563. warn "No container for given services found in current project '$project_name'."
  1564. else
  1565. warn "No container found for current project '$project_name'."
  1566. fi
  1567. fi
  1568. return 1
  1569. fi
  1570. }
  1571. awk:require() {
  1572. local require_at_least="$1" version already_installed
  1573. while true; do
  1574. if ! version=$(awk --version 2>/dev/null); then
  1575. version=""
  1576. else
  1577. version=${version%%,*}
  1578. version=${version##* }
  1579. fi
  1580. if [ -z "$version" ] || version_gt "$require_at_least" "$version"; then
  1581. if [ -z "$already_installed" ]; then
  1582. if [ -z "$version" ]; then
  1583. info "No 'gawk' available, probably using a clone. Installing 'gawk'..."
  1584. else
  1585. info "Found gawk version '$version'. Updating 'gawk'..."
  1586. fi
  1587. apt-get install gawk -y </dev/null || {
  1588. err "Failed to install 'gawk'."
  1589. return 1
  1590. }
  1591. already_installed=true
  1592. else
  1593. if [ -z "$version" ]; then
  1594. err "No 'gawk' available even after having installed one"
  1595. else
  1596. err "'gawk' version '$version' is lower than required" \
  1597. "'$require_at_least' even after updating 'gawk'."
  1598. fi
  1599. return 1
  1600. fi
  1601. continue
  1602. fi
  1603. return 0
  1604. done
  1605. }
  1606. cmdline.spec.gnu stats
  1607. cmdline.spec::cmd:stats:run() {
  1608. : :optval: --format,-f "Either 'silent', 'raw', or 'pretty', default is pretty."
  1609. : :optfla: --silent,-s "Shorthand for '--format silent'"
  1610. : :optval: --resource,-r 'resource(s) separated with a comma'
  1611. local project_name service_name containers container check
  1612. if [[ -n "${opt_silent}" ]]; then
  1613. if [[ -n "${opt_format}" ]]; then
  1614. err "'--silent' conflict with option '--format'."
  1615. return 1
  1616. fi
  1617. opt_format=s
  1618. fi
  1619. opt_format="${opt_format:-pretty}"
  1620. case "${opt_format}" in
  1621. raw|r)
  1622. opt_format="raw"
  1623. :
  1624. ;;
  1625. silent|s)
  1626. opt_format="silent"
  1627. ;;
  1628. pretty|p)
  1629. opt_format="pretty"
  1630. awk:require 4.1.4 || return 1
  1631. ;;
  1632. *)
  1633. err "Invalid value '$opt_format' for option --format"
  1634. echo " use either 'raw' (shorthand 'r'), 'silent' (shorthand 's') or pretty (shorthand 'p')." >&2
  1635. return 1
  1636. esac
  1637. local resources=(c.{memory,network} load_avg)
  1638. if [ -n "${opt_resource}" ]; then
  1639. resources=(${opt_resource//,/ })
  1640. fi
  1641. local not_found=()
  1642. for resource in "${resources[@]}"; do
  1643. if ! fn.exists "stats:$resource"; then
  1644. not_found+=("$resource")
  1645. fi
  1646. done
  1647. if [[ "${#not_found[@]}" -gt 0 ]]; then
  1648. not_found_msg=$(printf "%s, " "${not_found[@]}")
  1649. not_found_msg=${not_found_msg%, }
  1650. err "Unsupported resource(s) provided: ${not_found_msg}"
  1651. echo " resource must be one-of:" >&2
  1652. declare -F | egrep -- '-fx? stats:[a-zA-Z0-9_.]+$' | cut -f 3- -d " " | cut -f 2- -d ":" | prefix " - " >&2
  1653. return 1
  1654. fi
  1655. :state-dir:
  1656. for resource in "${resources[@]}"; do
  1657. [ "$opt_format" == "pretty" ] && echo "${WHITE}$resource${NORMAL}:"
  1658. stats:"$resource" "$opt_format" 2>&1 | prefix " "
  1659. set_errlvl "${PIPESTATUS[0]}" || return 1
  1660. done
  1661. }
  1662. stats:c.memory() {
  1663. local format="$1"
  1664. local out
  1665. container_to_check=($(docker:running_containers)) || exit 1
  1666. out=$(docker:containers:stats "${container_to_check[@]}")
  1667. printf "%s\n" "$out" | rrd:update "containers" "memory|3:usage:GAUGE:U:U,4:inactive:GAUGE:U:U" || {
  1668. return 1
  1669. }
  1670. case "${format:-p}" in
  1671. raw|r)
  1672. printf "%s\n" "$out" | cut -f 1-5 -d " "
  1673. ;;
  1674. pretty|p)
  1675. awk:require 4.1.4 || return 1
  1676. {
  1677. echo "container" "__total____" "buffered____" "resident____"
  1678. printf "%s\n" "$out" |
  1679. awk '
  1680. {
  1681. offset = strftime("%z", $2);
  1682. print $1, substr($0, index($0,$3));
  1683. }' | cut -f 1-4 -d " " |
  1684. numfmt --field 2-4 --to=iec-i --format=%8.1fB |
  1685. sed -r 's/(\.[0-9])([A-Z]?iB)/\1:\2/g' |
  1686. sort
  1687. } | col:normalize:size -+++ |
  1688. sed -r 's/(\.[0-9]):([A-Z]?iB)/\1 \2/g' |
  1689. header:make
  1690. ;;
  1691. esac
  1692. }
  1693. stats:c.network() {
  1694. local format="$1"
  1695. local out
  1696. container_to_check=($(docker:running_containers)) || exit 1
  1697. out=$(docker:containers:stats "${container_to_check[@]}")
  1698. cols=(
  1699. {rx,tx}_{bytes,packets,errors,dropped}
  1700. )
  1701. idx=5 ## starting column idx for next fields
  1702. defs=()
  1703. for col in "${cols[@]}"; do
  1704. defs+=("$((idx++)):${col}:COUNTER:U:U")
  1705. done
  1706. OLDIFS="$IFS"
  1707. IFS="," defs="${defs[*]}"
  1708. IFS="$OLDIFS"
  1709. printf "%s\n" "$out" |
  1710. rrd:update "containers" \
  1711. "network|${defs}" || {
  1712. return 1
  1713. }
  1714. case "${format:-p}" in
  1715. raw|r)
  1716. printf "%s\n" "$out" | cut -f 1,2,7- -d " "
  1717. ;;
  1718. pretty|p)
  1719. awk:require 4.1.4 || return 1
  1720. {
  1721. echo "container" "_" "_" "_" "RX" "_" "_" "_" "TX"
  1722. echo "_" "__bytes____" "__packets" "__errors" "__dropped" "__bytes____" "__packets" "__errors" "__dropped"
  1723. printf "%s\n" "$out" |
  1724. awk '
  1725. {
  1726. offset = strftime("%z", $2);
  1727. print $1, substr($0, index($0,$7));
  1728. }' |
  1729. numfmt --field 2,6 --to=iec-i --format=%8.1fB |
  1730. numfmt --field 3,4,5,7,8,9 --to=si --format=%8.1f |
  1731. sed -r 's/(\.[0-9])([A-Z]?(iB|B)?)/\1:\2/g' |
  1732. sort
  1733. } | col:normalize:size -++++++++ |
  1734. sed -r '
  1735. s/(\.[0-9]):([A-Z]?iB)/\1 \2/g;
  1736. s/(\.[0-9]):([KMGTPE])/\1 \2/g;
  1737. s/ ([0-9]+)\.0:B/\1 /g;
  1738. s/ ([0-9]+)\.0:/\1 /g;
  1739. ' |
  1740. header:make 2
  1741. ;;
  1742. esac
  1743. }
  1744. header:make() {
  1745. local nb_line="${1:-1}"
  1746. local line
  1747. while ((nb_line-- > 0)); do
  1748. read-0a line
  1749. echo "${GRAY}$(printf "%s" "$line" | sed -r 's/_/ /g')${NORMAL}"
  1750. done
  1751. cat
  1752. }
  1753. stats:load_avg() {
  1754. local format="$1"
  1755. local out
  1756. out=$(host:sys:load_avg)
  1757. printf "%s\n" "$out" | rrd:update "" "load_avg|2:load_avg_1:GAUGE:U:U,3:load_avg_5:GAUGE:U:U,4:load_avg_15:GAUGE:U:U" || {
  1758. return 1
  1759. }
  1760. case "${format:-p}" in
  1761. raw|r)
  1762. printf "%s\n" "$out" | cut -f 2-5 -d " "
  1763. ;;
  1764. pretty|p)
  1765. {
  1766. echo "___1m" "___5m" "__15m"
  1767. printf "%s\n" "$out" | cut -f 3-5 -d " "
  1768. } | col:normalize:size +++ | header:make
  1769. ;;
  1770. esac
  1771. }
  1772. host:sys:load_avg() {
  1773. local uptime
  1774. uptime="$(uptime)"
  1775. uptime=${uptime##*: }
  1776. uptime=${uptime//,/}
  1777. printf "%s " "" "$(date +%s)" "$uptime"
  1778. }
  1779. cmdline::parse "$@"