Browse Source

Merge pull request 'maj master' (#1) from 0k/0k-charms:master into master

Reviewed-on: https://git.myceliandre.fr/StephanSainleger/0k-charms/pulls/1
master
StephanSainleger 3 years ago
parent
commit
62a3948f5d
  1. 17
      apache/build/Dockerfile
  2. 4
      bitwarden/metadata.yml
  3. 12
      codimd/hooks/init
  4. 4
      cron/build/Dockerfile
  5. 8
      cron/hooks/init
  6. 20
      cron/hooks/pre_deploy
  7. 4
      cron/metadata.yml
  8. 2
      cyclos/hooks/init
  9. 63
      cyclos/hooks/pre_deploy
  10. 61
      cyclos/lib/common
  11. 6
      cyclos/metadata.yml
  12. BIN
      cyclos/src/init.sql.gz
  13. 14
      drone/metadata.yml
  14. 33
      etherpad/README.org
  15. 39
      etherpad/hooks/init
  16. 26
      etherpad/hooks/postgres_database-relation-joined
  17. 53
      etherpad/metadata.yml
  18. 3
      gitea/metadata.yml
  19. 47
      gogocarto/README.org
  20. 4
      gogocarto/hooks/init
  21. 2
      gogocarto/hooks/mongo_database-relation-joined
  22. 11
      gogocarto/hooks/publish_dir-relation-joined
  23. 44
      gogocarto/hooks/schedule_commands-relation-joined
  24. 40
      gogocarto/lib/common
  25. 5
      gogocarto/metadata.yml
  26. 12
      hedgedoc/hooks/init
  27. 0
      hedgedoc/hooks/postgres_database-relation-joined
  28. 22
      hedgedoc/hooks/web_proxy-relation-joined
  29. 4
      hedgedoc/metadata.yml
  30. 2
      logrotate/build/src/entrypoint.sh
  31. 19
      mariadb/build/Dockerfile
  32. 20
      mariadb/build/src/entrypoint.sh
  33. 48
      mariadb/hooks/init
  34. 102
      mariadb/hooks/install.d/60-backup.sh
  35. 3
      mariadb/hooks/schedule_command-relation-joined
  36. 4
      mariadb/metadata.yml
  37. 40
      mariadb/resources/bin/mysql-backup
  38. 27
      monujo/hooks/init
  39. 17
      monujo/metadata.yml
  40. 4
      mysql/hooks/install
  41. 13
      nextcloud/build/Dockerfile
  42. 14
      nextcloud/build/database-accept-dots.patch
  43. 2
      nextcloud/metadata.yml
  44. 50
      odoo-tecnativa/actions/install
  45. 18
      onlyoffice/hooks/init
  46. 3
      onlyoffice/hooks/nextcloud_app-relation-joined
  47. 34
      onlyoffice/hooks/postgres_database-relation-joined
  48. 36
      onlyoffice/metadata.yml
  49. 12
      peertube/build/Dockerfile
  50. 26
      peertube/build/dbname.patch
  51. 1
      peertube/hooks/init
  52. 2
      peertube/hooks/postgres_database-relation-joined
  53. 2
      postgres/metadata.yml
  54. 4
      precise/0k-odoo-light/hooks/install
  55. 2
      precise/apt-cacher/hooks/install
  56. 42
      precise/base-0k/hooks/install.d/00-base.sh
  57. 2
      precise/base-0k/hooks/install.d/05-shyaml.sh
  58. 6
      precise/base-0k/hooks/install.d/20-kal-scripts.sh
  59. 80
      precise/base-0k/hooks/install.d/30-customize.sh
  60. 2
      precise/ca/hooks/install
  61. 2
      precise/git/hooks/install
  62. 4
      precise/host/hooks/install.d/38-ntp.sh
  63. 3
      precise/host/hooks/install.d/39-logrotate.sh
  64. 5
      precise/host/hooks/install.d/40-btrfs.sh
  65. 45
      precise/host/hooks/install.d/50-lxc.sh
  66. 32
      precise/host/hooks/install.d/60-docker.sh
  67. 1
      precise/host/hooks/install.d/61-mirror-dir.sh
  68. 23
      precise/host/hooks/install.d/70-0k.sh
  69. 23
      precise/host/hooks/install.d/75-fail2ban.sh
  70. 19
      precise/host/hooks/install.d/80-dns-waterfall.sh
  71. 245
      precise/host/hooks/install.d/90-shorewall.sh
  72. 37
      precise/host/hooks/install.d/95-checks.sh
  73. 74
      precise/host/hooks/install.d/96-backup-lxc.sh
  74. 2
      precise/mirror/hooks/install
  75. 2
      precise/pypi-cacher/hooks/install
  76. 2
      precise/svn/hooks/install
  77. 8
      precise/vpn/hooks/install
  78. 60
      rocketchat/README.org
  79. 4
      rocketchat/metadata.yml
  80. 125
      rsync-backup-target/README.org
  81. 10
      rsync-backup-target/build/Dockerfile
  82. 42
      rsync-backup-target/build/entrypoint.sh
  83. 7
      rsync-backup-target/build/src/etc/sudoers.d/recover
  84. 3
      rsync-backup-target/build/src/etc/sudoers.d/rsync
  85. 76
      rsync-backup-target/build/src/usr/local/sbin/request-recovery-key
  86. 106
      rsync-backup-target/build/src/usr/local/sbin/ssh-admin-cmd-validate
  87. 66
      rsync-backup-target/build/src/usr/local/sbin/ssh-cmd-validate
  88. 152
      rsync-backup-target/build/src/usr/local/sbin/ssh-key
  89. 97
      rsync-backup-target/build/src/usr/local/sbin/ssh-recover-cmd-validate
  90. 68
      rsync-backup-target/build/src/usr/local/sbin/ssh-update-keys
  91. 65
      rsync-backup-target/hooks/init
  92. 83
      rsync-backup-target/hooks/log_rotate-relation-joined
  93. 12
      rsync-backup-target/metadata.yml
  94. 40
      rsync-backup-target/resources/bin/compose-add-rsync-key
  95. 2
      rsync-backup/build/Dockerfile
  96. 38
      rsync-backup/hooks/install.d/60-install.sh
  97. 1
      rsync-backup/hooks/schedule_command-relation-joined
  98. 2
      rsync-backup/metadata.yml
  99. 630
      rsync-backup/resources/bin/mirror-dir
  100. 12
      searx/hooks/web_proxy-relation-joined

17
apache/build/Dockerfile

@ -1,7 +1,7 @@
## copy of 'php:7.3-apache' image ## copy of 'php:7.3-apache' image
## XXXvlab: will need to move towards separate apache and php, so that nginx can ## XXXvlab: will need to move towards separate apache and php, so that nginx can
## replace apache in all conf. ## replace apache in all conf.
FROM docker.0k.io/php:7.3-apache-1
FROM docker.0k.io/php:7.4.13-apache
## Limesurvey ## Limesurvey
# RUN apt-get update && \ # RUN apt-get update && \
@ -20,7 +20,7 @@ FROM docker.0k.io/php:7.3-apache-1
## Framadate ## Framadate
RUN apt-get update && \ RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y libicu-dev libpq-dev libxml2-dev && \
DEBIAN_FRONTEND=noninteractive apt-get install -y libonig-dev libicu-dev libpq-dev libxml2-dev && \
apt-get clean && \ apt-get clean && \
rm -rf /var/lib/apt/lists/* && \ rm -rf /var/lib/apt/lists/* && \
docker-php-ext-install mbstring intl xml pdo_pgsql docker-php-ext-install mbstring intl xml pdo_pgsql
@ -30,7 +30,7 @@ RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y libexif-dev libexif12 libfreetype6-dev libjpeg62-turbo-dev libpng-dev && \ DEBIAN_FRONTEND=noninteractive apt-get install -y libexif-dev libexif12 libfreetype6-dev libjpeg62-turbo-dev libpng-dev && \
apt-get clean && \ apt-get clean && \
rm -rf /var/lib/apt/lists/* && \ rm -rf /var/lib/apt/lists/* && \
docker-php-ext-configure gd --with-freetype-dir=/usr/include/ --with-jpeg-dir=/usr/include/ && \
docker-php-ext-configure gd --with-freetype=/usr/include/ --with-jpeg=/usr/include/ && \
docker-php-ext-install exif gd pdo_mysql mysqli docker-php-ext-install exif gd pdo_mysql mysqli
## gogocarto ## gogocarto
@ -39,13 +39,13 @@ RUN apt-get update && \
libbz2-dev libc-client-dev libcurl4-openssl-dev libfreetype6-dev \ libbz2-dev libc-client-dev libcurl4-openssl-dev libfreetype6-dev \
libgd-dev libicu-dev libkrb5-dev libmagickcore-dev libmagickwand-dev \ libgd-dev libicu-dev libkrb5-dev libmagickcore-dev libmagickwand-dev \
libmcrypt-dev libmemcached-dev libtidy-dev libxml2-dev libxslt-dev \ libmcrypt-dev libmemcached-dev libtidy-dev libxml2-dev libxslt-dev \
libz-dev libzip-dev" && \
libz-dev libzip-dev libonig-dev" && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \
bzip2 cron g++ gettext git gnupg imagemagick libfreetype6 libgd3 \ bzip2 cron g++ gettext git gnupg imagemagick libfreetype6 libgd3 \
libmcrypt4 libmemcached11 libmemcachedutil2 libsodium23 libtidy5deb1 \ libmcrypt4 libmemcached11 libmemcachedutil2 libsodium23 libtidy5deb1 \
libxml2 libxslt1.1 libzip4 nano openssl unzip ${BUILD_PACKAGES} && \ libxml2 libxslt1.1 libzip4 nano openssl unzip ${BUILD_PACKAGES} && \
docker-php-ext-configure gd --with-freetype-dir=/usr/include/ \
--with-jpeg-dir=/usr/include/ && \
docker-php-ext-configure gd --with-freetype=/usr/include/ \
--with-jpeg=/usr/include/ && \
docker-php-ext-configure imap --with-kerberos --with-imap-ssl && \ docker-php-ext-configure imap --with-kerberos --with-imap-ssl && \
docker-php-ext-configure hash --with-mhash && \ docker-php-ext-configure hash --with-mhash && \
docker-php-ext-install bcmath bz2 calendar dba curl exif gd gettext \ docker-php-ext-install bcmath bz2 calendar dba curl exif gd gettext \
@ -63,14 +63,11 @@ RUN apt-get update && \
rm -rf /var/lib/apt/lists/* rm -rf /var/lib/apt/lists/*
## XXXvlab: could load these in 'entrypoint.sh' to be more dynamic ## XXXvlab: could load these in 'entrypoint.sh' to be more dynamic
RUN a2enmod headers proxy_http rewrite ssl proxy_wstunnel http2 proxy_connect RUN a2enmod headers proxy_http rewrite ssl proxy_wstunnel http2 proxy_connect
## Can remove this when SSL certificate are all valid ones ## Can remove this when SSL certificate are all valid ones
RUN apt-get update && apt-get install -y --force-yes ssl-cert
RUN apt-get update && apt-get install -y ssl-cert
COPY entrypoint.sh /entrypoint.sh COPY entrypoint.sh /entrypoint.sh

4
bitwarden/metadata.yml

@ -1,6 +1,6 @@
description: Bitwarden Server description: Bitwarden Server
#docker-image: bitwardenrs/server:1.17.0-alpine
docker-image: docker.0k.io/bitwarden:1.17.0
#docker-image: bitwardenrs/server:1.22.2-alpine
docker-image: docker.0k.io/bitwarden:1.22.2
data-resources: data-resources:
- /data - /data
uses: uses:

12
codimd/hooks/init

@ -1,12 +0,0 @@
#!/bin/bash
init-config-add "\
$SERVICE_NAME:
environment:
CMD_USECDN: \"false\"
"
## ``codimd`` create uploads folder with wrong permission
mkdir -p "$SERVICE_DATASTORE/home/hackmd/app/public/uploads"
chown -R 1500:1500 "$SERVICE_DATASTORE/home/hackmd/app/public/"

4
cron/build/Dockerfile

@ -1,7 +1,7 @@
FROM debian:jessie
FROM docker.0k.io/debian:jessie
RUN apt-get update && \ RUN apt-get update && \
DEBIAN_FRONTEND=noninteractive apt-get install -y --force-yes cron moreutils && \
DEBIAN_FRONTEND=noninteractive apt-get install -y cron moreutils && \
apt-get clean && \ apt-get clean && \
rm -rf /var/lib/apt/lists/* rm -rf /var/lib/apt/lists/*

8
cron/hooks/init

@ -19,6 +19,12 @@ fi
exit 1 exit 1
} }
[ "$HOST_COMPOSE_LAUNCHER_OPTS" ] || {
err "\$HOST_COMPOSE_LAUNCHER_OPTS is expected to be set."
exit 1
}
[ "$COMPOSE_LAUNCHER_BIN" ] || { [ "$COMPOSE_LAUNCHER_BIN" ] || {
err "\$COMPOSE_LAUNCHER_BIN is expected to be set." err "\$COMPOSE_LAUNCHER_BIN is expected to be set."
exit 1 exit 1
@ -31,7 +37,7 @@ $CHARM_NAME:
- /etc/timezone:/etc/timezone:ro - /etc/timezone:/etc/timezone:ro
- \$DOCKER_COMPOSE_PATH/bin/dc:/bin/dc:ro - \$DOCKER_COMPOSE_PATH/bin/dc:/bin/dc:ro
- $COMPOSE_LAUNCHER_BIN:/bin/compose:ro - $COMPOSE_LAUNCHER_BIN:/bin/compose:ro
- $COMPOSE_LAUNCHER_OPTS:$COMPOSE_LAUNCHER_OPTS:ro
- $HOST_COMPOSE_LAUNCHER_OPTS:$COMPOSE_LAUNCHER_OPTS:ro
environment: environment:
TZ: $timezone TZ: $timezone
COMPOSE_LAUNCHER_OPTS: $COMPOSE_LAUNCHER_OPTS COMPOSE_LAUNCHER_OPTS: $COMPOSE_LAUNCHER_OPTS

20
cron/hooks/pre_deploy

@ -0,0 +1,20 @@
#!/bin/bash
## Should be executable N time in a row with same result.
set -e
cron_config_hash() {
debug "Adding config hash to enable recreating upon config change."
config_hash=$({
find "$SERVICE_CONFIGSTORE/etc/cron"{,.hourly,.weekly,.daily,.monthly} \
-type f -exec md5sum {} \;
} | md5_compat) || exit 1
init-config-add "
$MASTER_BASE_SERVICE_NAME:
labels:
- compose.config_hash=$config_hash
"
}
cron_config_hash || exit 1

4
cron/metadata.yml

@ -11,7 +11,9 @@ data-resources:
host-resources: host-resources:
- /var/run/docker.sock - /var/run/docker.sock
provides: provides:
schedule-command:
schedule-command: ## for one command
tech-dep: False
schedule-commands: ## for several commands
tech-dep: False tech-dep: False
uses: ## optional uses: ## optional

2
cyclos/hooks/init

@ -40,7 +40,7 @@ done < <(array_values_to_stdin PROPS)
init-config-add " init-config-add "
$SERVICE_NAME: $SERVICE_NAME:
environment: environment:
JAVA_OPTS: |
JAVA_OPTS: >-
-Dcyclos.cors.origin=* -Dcyclos.cors.origin=*
-Dcyclos.header.remoteAddress=X-Forwarded-For -Dcyclos.header.remoteAddress=X-Forwarded-For
-Dcyclos.header.protocol=X-Forwarded-Proto -Dcyclos.header.protocol=X-Forwarded-Proto

63
cyclos/hooks/pre_deploy

@ -0,0 +1,63 @@
#!/bin/bash
##
## Get domain in option of relation "web-proxy"
##
## XXXvlab: there is a tiny lapse of time where database is not yet
## installed, and admin password is the default value.
. lib/common
set -ex
admin_password=$(options-get admin-password 2>/dev/null ) || exit 1
CONTROL_PASSWORD_FILE="$SERVICE_DATASTORE/.control-pass"
## Was it already properly propagated to database ?
control_password=$(H "${admin_password}")
if ! [ -e "$CONTROL_PASSWORD_FILE" ] || [ "$control_password" != "$(cat "$CONTROL_PASSWORD_FILE")" ]; then
hash="$(htpasswd -nbBC 10 USER "$admin_password" | cut -f 2- -d :)" || {
err "Couldn't generate hash for admin password."
exit 1
}
if ! sql < <(e "
UPDATE passwords SET value = '$hash'
WHERE user_id = 1
AND status = 'ACTIVE'
AND password_type_id in (
SELECT id FROM password_types
WHERE input_method = 'TEXT_BOX'
AND password_mode = 'MANUAL');
"); then
debug "Failed to set password for admin users."
exit 1
fi
mkdir -p "${CONTROL_PASSWORD_FILE%/*}"
e "$control_password" > "$CONTROL_PASSWORD_FILE"
fi
url=$(named-relation-get "web-proxy" url) || exit 1
CONTROL_URL_FILE="$SERVICE_DATASTORE/.control-url"
## Was it already properly propagated to database ?
control_url=$(H "${url}")
if ! [ -e "$CONTROL_URL_FILE" ] || [ "$control_url" != "$(cat "$CONTROL_URL_FILE")" ]; then
## In ``configurations`` table, columns login_url, logout_url, root_url
if ! sql < <(e "
UPDATE configurations
SET
root_url = '$url'
"); then
debug "Failed to set password for admin users."
exit 1
fi
e "$control_password" > "$CONTROL_URL_FILE"
fi

61
cyclos/lib/common

@ -1,4 +1,4 @@
# -*- mode: bash -*-
# -*- mode: shell-script -*-
CYCLOS_CONFIG="/usr/local/cyclos/WEB-INF/classes/cyclos.properties" CYCLOS_CONFIG="/usr/local/cyclos/WEB-INF/classes/cyclos.properties"
@ -12,3 +12,62 @@ cyclos:build() {
docker cp "$container_id":"$CYCLOS_CONFIG" "$HOST_CYCLOS_CONFIG" && docker cp "$container_id":"$CYCLOS_CONFIG" "$HOST_CYCLOS_CONFIG" &&
docker rm "$container_id" docker rm "$container_id"
} }
named-relation-get-target-service() {
local relation="$1" ts
if ! read-0 ts _ _ < <(get_service_relation "$SERVICE_NAME" "$relation"); then
err "Couldn't find relation ${DARKCYAN}$relation${NORMAL}."
return 1
fi
e "$ts"
}
## XXXvlab: Consider for inclusion in compose-core
relation-get-config() {
local relation="$1" ts relation_dir
ts=$(named-relation-get-target-service "$relation") || return 1
relation_dir=$(get_relation_data_dir "$SERVICE_NAME" "$ts" "$relation") || return 1
cat "${relation_dir}/data"
}
named-relation-get() {
local relation="$1" key="$2" config
config=$(relation-get-config "$relation") || return 1
e "$config" | shyaml get-value "$key" || {
err "Couldn't get ${WHITE}$key${NORMAL} value" \
"in ${DARKCYAN}$relation${NORMAL} relation's data."
exit 1
}
}
sql() {
(
DBNAME="$(named-relation-get "postgres-database" dbname)" || exit 1
ts=$(named-relation-get-target-service "postgres-database") || exit 1
export SERVICE_NAME="$ts"
export SERVICE_DATASTORE="$DATASTORE/$SERVICE_NAME"
DOCKER_BASE_IMAGE=$(service_base_docker_image "$SERVICE_NAME")
export DOCKER_BASE_IMAGE
target_charm=$(get_service_charm "$ts") || exit 1
target_charm_path=$(charm.get_dir "$target_charm") || exit 1
set +e
. "$target_charm_path/lib/common"
set -e
ensure_db_docker_running
ddb "$DBNAME"
)
}

6
cyclos/metadata.yml

@ -1,5 +1,5 @@
## Based on cyclos/cyclos:4.13.2
docker-image: docker.0k.io/cyclos:4.13.2
## Based on cyclos/cyclos:4.14
docker-image: docker.0k.io/cyclos:4.14.7
config-resources: config-resources:
- /usr/local/cyclos/WEB-INF/classes/cyclos.properties - /usr/local/cyclos/WEB-INF/classes/cyclos.properties
data-resources: data-resources:
@ -24,6 +24,8 @@ uses:
- earthdistance - earthdistance
- postgis - postgis
- unaccent - unaccent
init-sql: !bash-stdout |
zcat "$BASE_CHARM_PATH/src/init.sql.gz"
log-rotate: log-rotate:
#constraint: required | recommended | optional #constraint: required | recommended | optional
#auto: pair | summon | none ## default: pair #auto: pair | summon | none ## default: pair

BIN
cyclos/src/init.sql.gz

14
drone/metadata.yml

@ -47,3 +47,17 @@ uses:
proxy: "Public access" proxy: "Public access"
default-options: default-options:
target: !var-expand ${MASTER_BASE_SERVICE_NAME}:80 target: !var-expand ${MASTER_BASE_SERVICE_NAME}:80
backup:
constraint: recommended
auto: pair
solves:
backup: "Automatic regular backups of sql database"
default-options:
## First pattern matching wins, no pattern matching includes.
## include-patterns are checked first, then exclude-patterns
## Patterns rules:
## - ending / for directory
## - '*' authorized
## - must start with a '/', will start from $SERVICE_DATASTORE
# include-patterns:
# - /var/backups/pg/

33
etherpad/README.org

@ -0,0 +1,33 @@
# -*- ispell-local-dictionary: "english" -*-
#+TITLE: Etherpad Charm
* Upgrade
Based on https://github.com/ether/etherpad-lite , following:
https://github.com/ether/etherpad-lite/blob/develop/doc/docker.md
Used:
#+begin_src sh
TAG=1.8.14
git clone https://github.com/ether/etherpad-lite --depth 1 -b $TAG
docker build --build-arg INSTALL_SOFFICE=1 \
--build-arg ETHERPAD_PLUGINS=" \
ep_font_family ep_mammoth ep_comments_page ep_table_of_contents \
ep_markdown ep_image_upload ep_spellcheck ep_headings2 ep_align \
ep_who_did_what ep_what_have_i_missed ep_embedmedia \
ep_openid_connect ep_rss ep_git_commit_saved_revision" \
. -t docker.0k.io/etherpad:${TAG}-0k
docker push docker.0k.io/etherpad:${TAG}-0k
#+end_src
* Admin password
We choose to NOT include admin panel as it allows to change settings
and install plugins but this will not allow to reproduce an install
easily. We can do this on the =compose.yml= side in a reproducible
manner.

39
etherpad/hooks/init

@ -0,0 +1,39 @@
#!/bin/bash
## Init is run on host
## For now it is run every time the script is launched, but
## it should be launched only once after build.
## Accessible variables are:
## - SERVICE_NAME Name of current service
## - DOCKER_BASE_IMAGE Base image from which this service might be built if any
## - SERVICE_DATASTORE Location on host of the DATASTORE of this service
## - SERVICE_CONFIGSTORE Location on host of the CONFIGSTORE of this service
. lib/common
set -e
dirs=(
"$SERVICE_DATASTORE/var/lib/etherpad"
)
uid_gid=($(docker_get_uid_gid "$SERVICE_NAME" "etherpad" "etherpad")) || {
err "Could not fetch uid/gid on image of service ${DARKYELLOW}$SERVICE_NAME${NORMAL}."
return 1
}
uid="${uid_gid[0]}"
gid="${uid_gid[1]}"
for dir in "${dirs[@]}"; do
mkdir -p "$dir"
find "$dir" \! -uid "$uid" -print0 | while read-0 f; do
chown -v "$uid" "$f" || return 1
done
find "$dir" \! -gid "$gid" -print0 | while read-0 f; do
chgrp -v "$gid" "$f" || return 1
done
done

26
etherpad/hooks/postgres_database-relation-joined

@ -0,0 +1,26 @@
#!/bin/bash
set -e
PASSWORD="$(relation-get password)"
USER="$(relation-get user)"
DBNAME="$(relation-get dbname)"
control=$(echo -en "$USER\0$DBNAME\0$PASSWORD\0$ADMIN_PASSWORD" | md5_compat)
config-add "\
services:
$MASTER_BASE_SERVICE_NAME:
environment:
DB_TYPE: postgres
DB_HOST: \"$MASTER_TARGET_SERVICE_NAME\"
DB_NAME: \"$DBNAME\"
DB_PASS: \"$PASSWORD\"
DB_USER: \"$USER\"
"
[ "$control" == "$(relation-get control 2>/dev/null)" ] && exit 0
relation-set control "$control"
info "Configured $SERVICE_NAME code for $TARGET_SERVICE_NAME access."

53
etherpad/metadata.yml

@ -0,0 +1,53 @@
name: etherpad
summary: "Etherpad-lite server"
maintainer: "Valentin Lab <valentin.lab@kalysto.org>"
inherit: base-0k
## Custom built from git 1.8.14 https://github.com/ether/etherpad-lite with
## build arg --build-arg INSTALL_SOFFICE=1
docker-image: docker.0k.io/etherpad:1.8.14-soffice ## custom built from git m etherpad/etherpad
description: |
Etherpad-lite service.
data-resources:
- /var/lib/etherpad
docker-compose:
command: node src/node/server.js --apikey /var/lib/etherpad/APIKEY.txt
environment:
SOFFICE: '/usr/bin/soffice'
uses:
postgres-database:
#constraint: required | recommended | optional
#auto: pair | summon | none ## default: pair
constraint: required
auto: summon
solves:
database: "main storage"
default-options:
extensions:
- unaccent
web-proxy:
#constraint: required | recommended | optional
#auto: pair | summon | none ## default: pair
constraint: recommended
auto: pair
solves:
proxy: "Public access"
default-options:
target: !var-expand ${MASTER_BASE_SERVICE_NAME}:9001
backup:
constraint: recommended
auto: pair
solves:
backup: "Automatic regular backup"
default-options:
## First pattern matching wins, no pattern matching includes.
## include-patterns are checked first, then exclude-patterns
## Patterns rules:
## - ending / for directory
## - '*' authorized
## - must start with a '/', will start from $SERVICE_DATASTORE
#exclude-patterns:
# - "/var/lib/odoo/sessions/"

3
gitea/metadata.yml

@ -1,7 +1,8 @@
description: "Gitea Server" description: "Gitea Server"
maintainer: "Valentin Lab <valentin.lab@kalysto.org>" maintainer: "Valentin Lab <valentin.lab@kalysto.org>"
## XXXvlab: docker uses the 'build' directory or the 'image:' option here. ## XXXvlab: docker uses the 'build' directory or the 'image:' option here.
docker-image: docker.0k.io/gitea:1.0.0
## based on gitea/gitea:1.14.2
docker-image: docker.0k.io/gitea:1.14.2
docker-compose: docker-compose:
ports: ports:
- "5022:22" - "5022:22"

47
gogocarto/README.org

@ -15,35 +15,28 @@ far from being mature, we try here to get only the released part.
We are talking of: We are talking of:
[[https://gitlab.adullact.net/pixelhumain/GoGoCarto/-/blob/f3c10f16fc08b533ef44f1325fdb50f87fa73224/docs/installation_docker.md][gogocarto docker install documentation]] [[https://gitlab.adullact.net/pixelhumain/GoGoCarto/-/blob/f3c10f16fc08b533ef44f1325fdb50f87fa73224/docs/installation_docker.md][gogocarto docker install documentation]]
** Intermediate docker image is cached on docker.0k.io if needed
** Updating
To rebuild a new version, I pushed the huge docker image of
=gogocarto= container that is required in the process (that contains
apache and all build/install tools). Note that this should not require
to be rebuild if anything important changes in the directory =docker/=.
This process will ensure to avoid rebuilding the gigantic intermediate
image needed (with apache and build/install tools).
Using:
You need to run this from the root of a code checkout of:
https://gitlab.adullact.net/pixelhumain/GoGoCarto
#+begin_src sh #+begin_src sh
docker_tree_hash=$(git rev-parse HEAD:docker) docker_tree_hash=$(git rev-parse HEAD:docker)
docker tag docker_gogocarto docker.0k.io/gogocarto-builder:${docker_tree_hash}
docker push docker.0k.io/gogocarto-builder:${docker_tree_hash}
## Intermediate docker image is cached on docker.0k.io if needed
if ! docker pull docker.0k.io/gogocarto-builder:${docker_tree_hash}; then
make build &&
docker tag docker_gogocarto docker.0k.io/gogocarto-builder:${docker_tree_hash} &&
docker push docker.0k.io/gogocarto-builder:${docker_tree_hash}
else
docker tag docker.0k.io/gogocarto-builder:${docker_tree_hash} docker_gogocarto
fi
make up &&
docker-compose -f docker/docker-compose.yml exec gogocarto make init
#+end_src #+end_src
So, if needed, it can be pulled back to avoid the hassle of making it:
#+begin_src sh
docker_tree_hash=$(git rev-parse HEAD:docker)
docker pull docker.0k.io/gogocarto-builder:${docker_tree_hash} &&
docker tag docker.0k.io/gogocarto-builder:${docker_tree_hash} docker_gogocarto
#+end_src
You still need to do (as of <2020-10-20 Tue>):
- get the latest source code
- =make up= to launch de containers with the images
- =make shell= from the latest git repos
- =make init= in the container
** Full release is cached on =docker.0k.io/downloads= ** Full release is cached on =docker.0k.io/downloads=
This is the content of the source tree, once populated by =make init=. This is the content of the source tree, once populated by =make init=.
@ -55,7 +48,6 @@ As of <2020-10-23 Fri>, the =Makefile='s =init= target is:
#+begin_src sh #+begin_src sh
$ grep ^init Makefile -A1 $ grep ^init Makefile -A1
init: install assets load-fixtures fix-perms ## Initialize the project init: install assets load-fixtures fix-perms ## Initialize the project
#+end_src #+end_src
Note that =load-fixtures= target is actually the mongodb initialization: Note that =load-fixtures= target is actually the mongodb initialization:
@ -71,7 +63,7 @@ load-fixtures: ## Create the DB schema, generate DB classes and load fixtures
This will be done in the =hooks/mongo_database-relation-joined= accordingly. This will be done in the =hooks/mongo_database-relation-joined= accordingly.
*** create the full data release bundle this is what was made:
*** create the full data release bundle this is what was made
#+begin_src sh #+begin_src sh
## correct bundles links (no idea if it is needed) ## correct bundles links (no idea if it is needed)
@ -94,6 +86,7 @@ commit_sha=$(git describe HEAD --tags)
tar cjv \ tar cjv \
bin web vendor config src templates translations \ bin web vendor config src templates translations \
--exclude=.git --exclude=.gitignore \ --exclude=.git --exclude=.gitignore \
--owner=root --group=root \
> gogocarto-${commit_sha}.tar.bz2 > gogocarto-${commit_sha}.tar.bz2
#+end_src #+end_src
@ -103,9 +96,3 @@ all non '\*.{php,yml,js}' files. I noticed many many unrelated files in
We need =bin= for symphony utilities that allows to setup things. We need =bin= for symphony utilities that allows to setup things.
* Roadmap
** TODO mongo / the default database name is probably not correctly set.
Indeed, it seems it is defaulted to =gogocarto_default= and I didn't find
yet how to set it. There's a connection with SAAS mode I think.

4
gogocarto/hooks/init

@ -16,8 +16,6 @@
set -e set -e
if ! [ -e "$GOGOCARTO_CODE" ]; then
gogocarto:init || exit 1
fi
gogocarto:init || exit 1
gogocarto:config || exit 1 gogocarto:config || exit 1

2
gogocarto/hooks/mongo_database-relation-joined

@ -24,6 +24,8 @@ MONGODB_URL=$MONGO_URL
MONGODB_DATABASE=$DBNAME MONGODB_DATABASE=$DBNAME
###< doctrine/mongodb-odm-bundle ### ###< doctrine/mongodb-odm-bundle ###
DATABASE_NAME=$DBNAME
EOF EOF
if [ -e "$inited" ]; then if [ -e "$inited" ]; then

11
gogocarto/hooks/publish_dir-relation-joined

@ -49,6 +49,7 @@ cat <<EOF > "${GOGOCARTO_CODE}"/web/.htaccess
Options -MultiViews Options -MultiViews
RewriteEngine On RewriteEngine On
RewriteRule ^js/.* - [L]
RewriteCond %{REQUEST_FILENAME} !-f RewriteCond %{REQUEST_FILENAME} !-f
RewriteRule ^(.*)$ index.php/\$1 [QSA,L] RewriteRule ^(.*)$ index.php/\$1 [QSA,L]
@ -59,8 +60,10 @@ config-add "
services: services:
$MASTER_TARGET_SERVICE_NAME: $MASTER_TARGET_SERVICE_NAME:
volumes: volumes:
- $SERVICE_CONFIGSTORE/opt/apps/gogocarto:/opt/apps/gogocarto:rw
- $SERVICE_DATASTORE/var/cache/gogocarto:/opt/apps/gogocarto/var/cache:rw
- $SERVICE_DATASTORE/var/lib/gogocarto/sessions:/opt/apps/gogocarto/var/sessions:rw
- $SERVICE_DATASTORE/var/log/gogocarto:/opt/apps/gogocarto/var/log:rw
- $GOGOCARTO_CODE:$GOGOCARTO_DIR:rw
- $SERVICE_DATASTORE/var/cache/gogocarto:$GOGOCARTO_DIR/var/cache:rw
- $SERVICE_DATASTORE/var/lib/gogocarto/sessions:$GOGOCARTO_DIR/var/sessions:rw
- $SERVICE_DATASTORE/var/log/gogocarto:$GOGOCARTO_DIR/var/log:rw
## Required to give PHP access to this dir
- $upload_dir:$GOGOCARTO_DIR/web/uploads:rw
" "

44
gogocarto/hooks/schedule_commands-relation-joined

@ -0,0 +1,44 @@
#!/bin/bash
## When writing relation script, remember:
## - they should be idempotents
## - they can be launched while the dockers is already up
## - they are launched from the host
## - the target of the link is launched first, and get a chance to ``relation-set``
## - both side of the scripts get to use ``relation-get``.
. lib/common
set -e
## XXXvlab: should use container name here so that it could support
## multiple postgres
label=${SERVICE_NAME}
DST=$CONFIGSTORE/$TARGET_SERVICE_NAME/etc/cron/$label
## XXXvlab: Should we do a 'docker exec' instead ?
bin_console="dc run -u www-data --rm --entrypoint \\\"$GOGOCARTO_DIR/bin/console\\\" $MASTER_BASE_SERVICE_NAME"
## Warning: 'docker -v' will use HOST directory even if launched from
## 'cron' container.
file_put "$DST" <<EOF
@daily root lock ${label}-checkvote -D -p 10 -c "\
$bin_console app:elements:checkvote" 2>&1 | ts '\%F \%T \%Z' >> /var/log/cron/${SERVICE_NAME}-checkvote_script.log
@daily root lock ${label}-checkExternalSourceToUpdate -D -p 10 -c "\
$bin_console app:elements:checkExternalSourceToUpdate" 2>&1 | ts '\%F \%T \%Z' >> /var/log/cron/${SERVICE_NAME}-checkExternalSourceToUpdate_script.log
@daily root lock ${label}-notify-moderation -D -p 10 -c "\
$bin_console app:notify-moderation" 2>&1 | ts '\%F \%T \%Z' >> /var/log/cron/${SERVICE_NAME}-notify-moderation_script.log
@hourly root lock ${label}-sendNewsletter -D -p 10 -c "\
$bin_console app:users:sendNewsletter" 2>&1 | ts '\%F \%T \%Z' >> /var/log/cron/${SERVICE_NAME}-sendNewsletter_script.log
*/5 * * * * root lock ${label}-webhooks-post -D -p 10 -c "\
$bin_console --env=prod app:webhooks:post" 2>&1 | ts '\%F \%T \%Z' >> /var/log/cron/${SERVICE_NAME}-webhooks-post_script.log
EOF
chmod +x "$DST"

40
gogocarto/lib/common

@ -1,15 +1,45 @@
# -*- mode: shell-script -*- # -*- mode: shell-script -*-
GOGOCARTO_CODE="$SERVICE_CONFIGSTORE/opt/apps/gogocarto"
GOGOCARTO_RELEASE=3.1.3-2-gf3c10f1
GOGOCARTO_DIR="/opt/apps/gogocarto"
GOGOCARTO_CODE="$SERVICE_CONFIGSTORE$GOGOCARTO_DIR"
GOGOCARTO_RELEASE=3.1.3-56-g6b8ba361
GOGOCARTO_URL=https://docker.0k.io/downloads/gogocarto-"${GOGOCARTO_RELEASE}".tar.bz2 GOGOCARTO_URL=https://docker.0k.io/downloads/gogocarto-"${GOGOCARTO_RELEASE}".tar.bz2
gogocarto:init() { gogocarto:init() {
current_version=""
if [ -d "${GOGOCARTO_CODE}" ]; then
current_version=$(cat "${GOGOCARTO_CODE}"/.version) || {
err "Couldn't find ${GOGOCARTO_CODE}/.version file."
echo " Your config dir is in a broken state." >&2
return 1
}
else
mkdir -p "${GOGOCARTO_CODE}" && mkdir -p "${GOGOCARTO_CODE}" &&
cd "${GOGOCARTO_CODE}" && cd "${GOGOCARTO_CODE}" &&
curl "$GOGOCARTO_URL" | tar xjv
git init . &&
git config user.email "root@localhost" &&
git config user.name "Root" || {
err "Couldn't create directory ${GOGOCARTO_CODE}, or init it with git."
return 1
}
fi
if [ "$current_version" != "$GOGOCARTO_RELEASE" ]; then
cd "${GOGOCARTO_CODE}" || return 1
if [ -d "$PWD"/.git ]; then
rm -rf "$PWD"/* "$PWD"/{.version,.inited-*,.env} || return 1
else
err "Can't find the '.git' directory in ${GOGOCARTO_CODE}."
return 1
fi
curl -L "$GOGOCARTO_URL" | tar xjv || {
err "Couldn't download $GOGOCARTO_URL."
return 1
}
echo "$GOGOCARTO_RELEASE" > .version
git add -A . &&
git commit -m "Release $GOGOCARTO_RELEASE"
fi
} }
@ -21,7 +51,7 @@ gogocarto:config() {
cat <<EOF > "${GOGOCARTO_CODE}"/.env cat <<EOF > "${GOGOCARTO_CODE}"/.env
###> symfony/framework-bundle ### ###> symfony/framework-bundle ###
APP_ENV=$APP_ENV
APP_ENV=${APP_ENV}
APP_SECRET=82ec369b81caab5446ddfc3b5edb4d00 APP_SECRET=82ec369b81caab5446ddfc3b5edb4d00
CSRF_PROTECTION=$( CSRF_PROTECTION=$(
[ "$APP_ENV" == "prod" ] && [ "$APP_ENV" == "prod" ] &&

5
gogocarto/metadata.yml

@ -26,3 +26,8 @@ uses:
auto: summon auto: summon
solves: solves:
database: "main storage" database: "main storage"
schedule-commands:
constraint: recommended
auto: pair
solves:
maintenance: "Production scheduled tasks"

12
hedgedoc/hooks/init

@ -0,0 +1,12 @@
#!/bin/bash
init-config-add "\
$SERVICE_NAME:
environment:
CMD_USECDN: \"false\"
"
## ``codimd`` create uploads folder with wrong permission
uid=$(docker_get_uid "$SERVICE_NAME" "hedgedoc")
mkdir -p "$SERVICE_DATASTORE/hedgedoc/public/uploads"
chown "$uid" "$SERVICE_DATASTORE/hedgedoc/public/uploads"

0
codimd/hooks/postgres_database-relation-joined → hedgedoc/hooks/postgres_database-relation-joined

22
hedgedoc/hooks/web_proxy-relation-joined

@ -0,0 +1,22 @@
#!/bin/bash
set -e
DOMAIN=$(relation-get domain) || exit 1
## These are mainly to setup the correct web-hook
if [ "$MASTER_BASE_SERVICE_NAME" == "$DOMAIN" ]; then
## This is because the IP will be the docker container version
USESSL=""
else
USESSL="CMD_PROTOCOL_USESSL: 'true'"
fi
config-add "\
services:
$MASTER_BASE_SERVICE_NAME:
environment:
CMD_DOMAIN: $DOMAIN
$USESSL
"

4
codimd/metadata.yml → hedgedoc/metadata.yml

@ -1,6 +1,6 @@
docker-image: docker.0k.io/hackmd:2.2.0 ## from: nabo.codimd.dev/hackmdio/hackmd:2.2.0
docker-image: docker.0k.io/hedgedoc:1.7.2 ## from: quay.io/hedgedoc/hedgedoc:1.7.2-alpine
data-resources: data-resources:
- /home/hackmd/app/public/uploads
- /hedgedoc/public/uploads
default-options: default-options:

2
logrotate/build/src/entrypoint.sh

@ -10,4 +10,4 @@ do
done >> status.clean done >> status.clean
mv status.clean status mv status.clean status
/usr/sbin/logrotate -s /var/lib/logrotate/status /etc/logrotate.conf
/usr/sbin/logrotate -v -s /var/lib/logrotate/status /etc/logrotate.conf

19
mariadb/build/Dockerfile

@ -0,0 +1,19 @@
FROM alpine:3.9
RUN apk add --no-cache mariadb mariadb-client mariadb-server-utils && \
rm -f /var/cache/apk/*
## Required by mysql-backup
RUN apk add --no-cache bash gzip && \
rm -f /var/cache/apk/*
RUN mkdir -p /run/mysqld && \
chown -R mysql:mysql /run/mysqld
RUN sed -i "s|.*bind-address\s*=.*|bind-address=0.0.0.0|g" /etc/my.cnf.d/mariadb-server.cnf
COPY src/ /
EXPOSE 3306
ENTRYPOINT ["/entrypoint.sh"]

20
mariadb/build/src/entrypoint.sh

@ -0,0 +1,20 @@
#!/bin/sh
if ! [ -d /var/lib/mysql/mysql ]; then
chown -R mysql:mysql /var/lib/mysql
mysql_install_db --user=mysql --ldata=/var/lib/mysql > /dev/null
fi
## Support of Ctrl-C: see https://github.com/docker-library/mysql/issues/47
run() {
"$@" &
pid="$!"
trap "kill -SIGQUIT $pid" INT TERM
wait
}
run mysqld --user=mysql --skip-name-resolve --skip-networking=0 "$@"

48
mariadb/hooks/init

@ -25,31 +25,7 @@ fi
if ! [ -d "$HOST_DATASTORE/${SERVICE_NAME}$DB_DATADIR" ]; then if ! [ -d "$HOST_DATASTORE/${SERVICE_NAME}$DB_DATADIR" ]; then
MYSQL_ROOT_PASSWORD="$(gen_password)" MYSQL_ROOT_PASSWORD="$(gen_password)"
debug docker run -e "MYSQL_ROOT_PASSWORD=$MYSQL_ROOT_PASSWORD" \
--rm \
-v "$DATA_DIR:$DB_DATADIR" \
--entrypoint /bin/bash "$DOCKER_BASE_IMAGE"
docker run -e "MYSQL_ROOT_PASSWORD=$MYSQL_ROOT_PASSWORD" \
--rm \
-v "$DATA_DIR:$DB_DATADIR" \
--entrypoint /bin/bash "$DOCKER_BASE_IMAGE" -c '
mysqld() {
echo "diverted mysqld call..." >&2;
echo "$*" | grep -E "(--help|--skip-networking)" >/dev/null 2>&1 || return;
echo " .. Allowing call." >&2;
/usr/sbin/mysqld "$@";
}
export -f mysqld;
/docker-entrypoint.sh mysqld' || true
## docker errorlevel is still 0 even if it failed.
## AND we must ignore mysqld error !
[ "$(find "$DATA_DIR" \
-maxdepth 0 -type d -empty 2>/dev/null)" ] && {
err "Docker run probably failed to do it's job."
exit 1
}
mkdir -p "${HOST_DB_PASSFILE%/*}"
## XXXvlab: this won't help support multiple project running on the ## XXXvlab: this won't help support multiple project running on the
## same host ## same host
cat <<EOF > "$HOST_DB_PASSFILE" cat <<EOF > "$HOST_DB_PASSFILE"
@ -57,5 +33,27 @@ if ! [ -d "$HOST_DATASTORE/${SERVICE_NAME}$DB_DATADIR" ]; then
password=$MYSQL_ROOT_PASSWORD password=$MYSQL_ROOT_PASSWORD
EOF EOF
chmod 600 "$HOST_DB_PASSFILE" chmod 600 "$HOST_DB_PASSFILE"
## deactivating final connection check
ddb () { true; }
export -f ddb
ensure_db_docker_running || exit 1
docker exec -i "$_DB_NAME" mysql <<EOF
USE mysql;
GRANT ALL ON *.* TO 'root'@'%' IDENTIFIED BY '$MYSQL_ROOT_PASSWORD' WITH GRANT OPTION;
GRANT ALL ON *.* TO 'root'@'localhost' IDENTIFIED BY '$MYSQL_ROOT_PASSWORD' WITH GRANT OPTION;
SET PASSWORD FOR 'root'@'localhost'=PASSWORD('${MYSQL_ROOT_PASSWORD}');
FLUSH PRIVILEGES;
EOF
. lib/common
err=$(echo "$check_command" | ddb 2>&1 >/dev/null) || {
err "Docker run probably failed to do it's job."
echo "$err" | prefix " " >&2
exit 1
}
info "New root password for mysql. " info "New root password for mysql. "
fi fi

102
mariadb/hooks/install.d/60-backup.sh

@ -0,0 +1,102 @@
set -eux ## important for unbound variable ?
## Require these to be set
# MYSQL_ROOT_PASSWORD=
# MYSQL_CONTAINER=
[ "${MYSQL_ROOT_PASSWORD}" ] || {
echo "Error: you must set \$MYSQL_ROOT_PASSWORD prior to running this script." >&2
exit 1
}
[ "${MYSQL_CONTAINER}" ] || {
echo "Error: you must set \$MYSQL_CONTAINER prior to running this script." >&2
exit 1
}
##
## Init, to setup passwordless connection to mysql
##
type -p mysql >/dev/null || {
case $(lsb_release -is) in
Debian)
case $(lsb_release -rs) in
10)
apt-get install -y default-mysql-client </dev/null
;;
*)
apt-get install -y mysql-client </dev/null
;;
esac
;;
Ubuntu)
apt-get install -y mysql-client </dev/null
;;
esac
}
if ! [ -e "/root/.my.cnf" ]; then
cat <<EOF > ~/.my.cnf
[client]
password=${MYSQL_ROOT_PASSWORD}
EOF
chmod 600 ~/.my.cnf
fi
##
## installation of the mysql-backup script
##
apt-get install -y kal-shlib-{core,pretty,common} </dev/null
ln -sf "${PWD}/resources/bin/mysql-backup" /usr/local/sbin/mysql-backup
##
## Connection to cron
##
depends cron
cat <<EOF > /etc/cron.d/mysql-backup
SHELL=/bin/bash
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
0 * * * * root /usr/local/sbin/mysql-backup --host \$(docker-ip "$MYSQL_CONTAINER" 2>/dev/null | sed -r 's/ +/ /g' | cut -f 3 -d " ") | logger -t mysql-backup
EOF
##
## Connection with backup
##
if type -p mirror-dir >/dev/null 2>&1; then
[ -d "/etc/mirror-dir" ] || {
echo "'mirror-dir' is installed but no '/etc/mirror-dir' was found." >&2
exit 1
}
depends shyaml
if ! sources=$(shyaml get-values default.sources < /etc/mirror-dir/config.yml); then
echo "Couldn't query 'default.sources' in '/etc/mirror-dir/config.yml'." >&2
exit 1
fi
if ! echo "$sources" | grep "^/var/backups/mysql$" 2>/dev/null; then
sed -i '/sources:/a\ - /var/backups/mysql' /etc/mirror-dir/config.yml
cat <<EOF >> /etc/mirror-dir/config.yml
/var/backups/mysql:
exclude:
- "/*.inprogress"
EOF
fi
else
echo "warn: 'mirror-dir' not installed, backup won't be sent" >&2
fi

3
mariadb/hooks/schedule_command-relation-joined

@ -33,12 +33,11 @@ COMPOSE_LAUNCHER_OPTS=$COMPOSE_LAUNCHER_OPTS
$schedule root lock $label -D -p 10 -c "\ $schedule root lock $label -D -p 10 -c "\
docker run --rm \ docker run --rm \
-e MYSQLHOST="${SERVICE_NAME}" \
--network ${PROJECT_NAME}_default \ --network ${PROJECT_NAME}_default \
-v \"$LOCAL_DB_PASSFILE\":/root/.my.cnf \ -v \"$LOCAL_DB_PASSFILE\":/root/.my.cnf \
-v \"$HOST_CHARM_STORE/${CHARM_REL_PATH#${CHARM_STORE}/}/resources/bin/mysql-backup:/usr/sbin/mysql-backup\" \ -v \"$HOST_CHARM_STORE/${CHARM_REL_PATH#${CHARM_STORE}/}/resources/bin/mysql-backup:/usr/sbin/mysql-backup\" \
-v \"$SERVICE_DATASTORE/var/backups/mysql:/var/backups/mysql\" \ -v \"$SERVICE_DATASTORE/var/backups/mysql:/var/backups/mysql\" \
--entrypoint mysql-backup \ --entrypoint mysql-backup \
\"$DOCKER_BASE_IMAGE\"" 2>&1 | ts '\%F \%T \%Z' >> /var/log/cron/${label}_script.log
\"$DOCKER_BASE_IMAGE\" --host \"${SERVICE_NAME}\"" 2>&1 | ts '\%F \%T \%Z' >> /var/log/cron/${label}_script.log
EOF EOF
chmod +x "$DST" chmod +x "$DST"

4
mariadb/metadata.yml

@ -1,6 +1,4 @@
name: MariaDB name: MariaDB
## From: mysql Ver 15.1 Distrib 10.0.21-MariaDB
docker-image: docker.0k.io/mariadb:1.0.0
maintainer: "Valentin Lab <valentin.lab@kalysto.org>" maintainer: "Valentin Lab <valentin.lab@kalysto.org>"
provides: provides:
mysql-database: mysql-database:
@ -18,7 +16,7 @@ uses:
schedule: "31 * * * *" ## schedule backup every hour schedule: "31 * * * *" ## schedule backup every hour
## This one is useful only if previous relation is used ## This one is useful only if previous relation is used
backup: backup:
constraint: optional
constraint: recommended
auto: pair auto: pair
solves: solves:
backup: "Automatic regular backups of dumps" backup: "Automatic regular backups of dumps"

40
mariadb/resources/bin/mysql-backup

@ -1,5 +1,34 @@
#!/bin/bash #!/bin/bash
usage="$exname [--host HOST] [DATABASE...]"
DBS=()
host=
while [ "$1" ]; do
case "$1" in
"--help"|"-h")
echo "$usage" >&2
exit 0
;;
"--host")
host="$2"
shift
;;
*)
DBS+=("$1")
;;
esac
shift
done
mysql_opts=()
if [ "$host" ]; then
mysql_opts+=(-h "$host")
fi
m() { m() {
mysql "${mysql_opts[@]}" -Bs "$@" mysql "${mysql_opts[@]}" -Bs "$@"
} }
@ -17,13 +46,10 @@ mysql_tables() {
echo "SHOW TABLES" | m "$db" echo "SHOW TABLES" | m "$db"
} }
mysql_opts=()
if [ "$MYSQLHOST" ]; then
mysql_opts+=(-h "$MYSQLHOST")
fi
DBS=($(mysql_databases)) || exit 1
if [ "${#DBS[@]}" == 0 ]; then
DBS=($(mysql_databases)) || exit 1
fi
mkdir -p /var/backups/mysql mkdir -p /var/backups/mysql
@ -38,7 +64,7 @@ for db in "${DBS[@]}"; do
[ -d "$dst" ] && mv "$dst" "$dst.old" [ -d "$dst" ] && mv "$dst" "$dst.old"
mkdir -p "$dst.inprogress" mkdir -p "$dst.inprogress"
(( start = SECONDS )) (( start = SECONDS ))
md "$db" --routines --no-data --add-drop-database --database "$db" | gzip --rsyncable > "$dst.inprogress/schema.sql.gz"
md "$db" --routines --no-data --add-drop-database --database "$db" | gzip --rsyncable > "$dst.inprogress/00-schema.sql.gz"
tables=$(mysql_tables "$db") tables=$(mysql_tables "$db")
for table in $tables; do for table in $tables; do
backup_file="$dst.inprogress/${table}.sql.gz" backup_file="$dst.inprogress/${table}.sql.gz"

27
monujo/hooks/init

@ -0,0 +1,27 @@
#!/bin/bash
## Init is run on host
## For now it is run every time the script is launched, but
## it should be launched only once after build.
## Accessible variables are:
## - SERVICE_NAME Name of current service
## - DOCKER_BASE_IMAGE Base image from which this service might be built if any
## - SERVICE_DATASTORE Location on host of the DATASTORE of this service
## - SERVICE_CONFIGSTORE Location on host of the CONFIGSTORE of this service
set -e
APP_NAME=monujo
SOURCE_URL="https://docker.0k.io/downloads/$APP_NAME-0.0.1.tar.bz2"
LOCATION="$SERVICE_DATASTORE/opt/apps/$APP_NAME"
mkdir -p "$LOCATION"
if dir_is_empty "$LOCATION"; then
cd "$LOCATION"
wget -q "$SOURCE_URL" -O file.tar.bz2
tar xjf file.tar.bz2
rm file.tar.bz2
chown root:root "$LOCATION" -R
fi

17
monujo/metadata.yml

@ -0,0 +1,17 @@
description: "LokWallet"
maintainer: "Valentin Lab <valentin.lab@kalysto.org>"
subordinate: true
uses:
publish-dir:
#constraint: required | recommended | optional
#auto: pair | summon | none ## default: pair
scope: container
constraint: required
auto: summon
solves:
container: "main running server"
default-options:
location: !var-expand "$DATASTORE/$BASE_SERVICE_NAME/opt/apps/monujo"
# data-dirs: ## write permission for web-app
# - .

4
mysql/hooks/install

@ -9,11 +9,11 @@ GIT_0K_CLONE_OPTIONS=${GIT_0K_CLONE_OPTIONS:-""}
GIT_0K_BASE=${GIT_0K_BASE:-"git.0k.io:/var/git"} GIT_0K_BASE=${GIT_0K_BASE:-"git.0k.io:/var/git"}
apt-get install -y --force-yes cron kal-scripts
apt-get install -y cron kal-scripts
debconf-set-selections <<< "mysql-server mysql-server/root_password password $MYSQL_PASSWORD" debconf-set-selections <<< "mysql-server mysql-server/root_password password $MYSQL_PASSWORD"
debconf-set-selections <<< "mysql-server mysql-server/root_password_again password $MYSQL_PASSWORD" debconf-set-selections <<< "mysql-server mysql-server/root_password_again password $MYSQL_PASSWORD"
apt-get install -y --force-yes mysql-server
apt-get install -y mysql-server

13
nextcloud/build/Dockerfile

@ -1,13 +0,0 @@
## This is a cache of nextcloud:18.0.1 image (gmp is included)
FROM docker.0k.io/nextcloud:1.2.0
##
## What is following is only to patch nextcloud to remove
## some database name checks
##
COPY database-accept-dots.patch /tmp/
RUN cd /usr/src/nextcloud && \
patch -p1 < /tmp/database-accept-dots.patch

14
nextcloud/build/database-accept-dots.patch

@ -1,14 +0,0 @@
diff --git a/lib/private/Setup/AbstractDatabase.php b/lib/private/Setup/AbstractDatabase.php
index 0cbfecf..a821a2e 100644
--- a/lib/private/Setup/AbstractDatabase.php
+++ b/lib/private/Setup/AbstractDatabase.php
@@ -72,9 +72,6 @@ abstract class AbstractDatabase {
} elseif (empty($config['dbname'])) {
$errors[] = $this->trans->t("%s enter the database name.", [$this->dbprettyname]);
}
- if(substr_count($config['dbname'], '.') >= 1) {
- $errors[] = $this->trans->t("%s you may not use dots in the database name", array($this->dbprettyname));
- }
return $errors;
}

2
nextcloud/metadata.yml

@ -1,9 +1,9 @@
docker-image: docker.0k.io/nextcloud:18.0.1-myc
data-resources: data-resources:
- /var/www/html - /var/www/html
- /var/lib/nextcloud/data - /var/lib/nextcloud/data
config-resources: config-resources:
- /var/www/html/config - /var/www/html/config
provides: provides:
nextcloud-app: nextcloud-app:
uses: uses:

50
odoo-tecnativa/actions/install

@ -0,0 +1,50 @@
#!/bin/bash
## Load action gets a first argument a DIRECTORY holding the necessary files.
##
##
if [ -z "$SERVICE_DATASTORE" ]; then
echo "This script is meant to be run through 'compose' to work properly." >&2
exit 1
fi
usage="$exname [-h|--help] DBNAME [MODULE ...]"
dbname=
modules=()
while [ "$1" ]; do
case "$1" in
"--help"|"-h")
print_usage
exit 0
;;
*)
[ -z "$dbname" ] && { dbname=$1 ; shift ; continue ; }
modules+=("$1")
;;
esac
shift
done
if [ -z "$dbname" ]; then
err "You must provide a destination database name as second argument."
print_usage
exit 1
fi
if [ -z "${modules[*]}" ]; then
err "You must provide at least one module as third argument."
print_usage
exit 1
fi
modules="$(echo "${modules[@]}" | tr " " ",")"
## This can work only if ~/.my.cnf is correctly created by init.
set -e
launch_docker_compose run "$CONTAINER_NAME" --init="$modules" -d "$dbname" --stop-after-init
info "Installed '$modules' module(s) into database '$dbname'."

18
onlyoffice/hooks/init

@ -13,9 +13,23 @@
set -e set -e
if ! [ -e "$SERVICE_CONFIGSTORE/etc/onlyoffice/documentserver/local.json" ]; then
image_id=$(service_base_image_id "$SERVICE_NAME") || {
err "couldn't get image id of $SERVICE_NAME."
exit 1
}
CONTROL_FILE="$SERVICE_CONFIGSTORE/etc/onlyoffice/.image_id"
if [ "$(cat "$CONTROL_FILE")" != "$image_id" ]; then
## first time we need to extract configuration from image ## first time we need to extract configuration from image
rm -rf "$SERVICE_CONFIGSTORE/etc/onlyoffice"
mkdir -p "$SERVICE_CONFIGSTORE/etc/onlyoffice" mkdir -p "$SERVICE_CONFIGSTORE/etc/onlyoffice"
service_base_image_export_dir "$SERVICE_NAME" /etc/onlyoffice/documentserver "$SERVICE_CONFIGSTORE/etc/onlyoffice/" service_base_image_export_dir "$SERVICE_NAME" /etc/onlyoffice/documentserver "$SERVICE_CONFIGSTORE/etc/onlyoffice/"
printf "%s" "$image_id" > "$CONTROL_FILE"
else
## probably not needed to regenerate fonts
init-config-add "\
$MASTER_BASE_SERVICE_NAME:
environment:
GENERATE_FONTS: \"false\"
"
fi fi

3
onlyoffice/hooks/nextcloud_app-relation-joined

@ -15,7 +15,8 @@ compose --no-relations --no-init \
app:install onlyoffice \; \ app:install onlyoffice \; \
config:system:set onlyoffice DocumentServerInternalUrl --value="http://$MASTER_BASE_SERVICE_NAME/" \; \ config:system:set onlyoffice DocumentServerInternalUrl --value="http://$MASTER_BASE_SERVICE_NAME/" \; \
config:system:set onlyoffice StorageUrl --value="http://$MASTER_TARGET_SERVICE_NAME/" \; \ config:system:set onlyoffice StorageUrl --value="http://$MASTER_TARGET_SERVICE_NAME/" \; \
config:app:set onlyoffice jwt_secret --value="$KEY"
config:app:set onlyoffice jwt_secret --value="$KEY" \; \
config:system:set allow_local_remote_servers --type=boolean --value=true
ONLYOFFICE_CFG="$SERVICE_CONFIGSTORE/etc/onlyoffice/documentserver/local.json" ONLYOFFICE_CFG="$SERVICE_CONFIGSTORE/etc/onlyoffice/documentserver/local.json"

34
onlyoffice/hooks/postgres_database-relation-joined

@ -0,0 +1,34 @@
#!/bin/bash
. lib/common
set -e
PASSWORD="$(relation-get password)"
USER="$(relation-get user)"
DBNAME="$(relation-get dbname)"
ADMIN_PASSWORD=$(relation-base-compose-get admin-password 2>/dev/null) || {
if [ -e "$CONFIG" ]; then
ADMIN_PASSWORD=$(grep ^admin_passwd "$CONFIG" | sed -r 's/^admin_passwd\s+=\s+(.+)$/\1/g')
fi
if [ -z "$ADMIN_PASSWORD" ]; then
info "Generating odoo admin password"
ADMIN_PASSWORD=$(gen_password)
fi
}
database=$(options-get database 2>/dev/null) || true
database="${database:-$DBNAME}"
config-add "\
services:
$MASTER_BASE_SERVICE_NAME:
environment:
DB_TYPE: \"postgres\"
DB_HOST: \"$MASTER_TARGET_SERVICE_NAME\"
DB_NAME: \"$DBNAME\"
DB_PWD: \"$PASSWORD\"
DB_USER: \"$USER\"
"
info "Configured $SERVICE_NAME code for $TARGET_SERVICE_NAME access."

36
onlyoffice/metadata.yml

@ -1,7 +1,14 @@
docker-image: docker.0k.io/oods:1.0.0
# from: https://github.com/0k/Docker-DocumentServer (6.1.0)
docker-image: docker.0k.io/oods:2.0.0
data-resources: data-resources:
- /var/www/onlyoffice/Data - /var/www/onlyoffice/Data
- /var/log/onlyoffice - /var/log/onlyoffice
## not documented but found in entrypoint and docker inspect
- /var/lib/onlyoffice
- /var/lib/postgres
- /var/lib/rabbitmq
- /var/lib/redis
- /usr/share/fonts/truetype/custom
config-resources: config-resources:
- /etc/onlyoffice/documentserver - /etc/onlyoffice/documentserver
@ -25,3 +32,30 @@ uses:
proxy: "Public access" proxy: "Public access"
default-options: default-options:
target: !var-expand ${MASTER_BASE_SERVICE_NAME}:80 target: !var-expand ${MASTER_BASE_SERVICE_NAME}:80
postgres-database:
constraint: required
auto: summon
solves:
database: "main storage"
## XXXvlab: this should not be necessary as official documentation
## seems to explain that all data are either logs or caches. But we
## had issues with onlyoffice not sending back modifications to the
## filesystem. With some tweaks, we can sometimes gets the data from
## application cache. So for now, we must include this data to
## backup.
backup:
constraint: recommended
auto: pair
solves:
backup: "Automatic regular backups of dumps"
default-options:
## First pattern matching wins, no pattern matching includes.
## include-patterns are checked first, then exclude-patterns
## Patterns rules:
## - ending / for directory
## - '*' authorized
## - must start with a '/', will start from $SERVICE_DATASTORE
include-patterns:
- /var/lib/onlyoffice/

12
peertube/build/Dockerfile

@ -1,4 +1,4 @@
FROM alpine:3.9 AS common
FROM alpine:3.14 AS common
RUN apk add gnupg ffmpeg RUN apk add gnupg ffmpeg
@ -13,11 +13,10 @@ FROM common AS builder
## Download target release ## Download target release
## ##
ENV PEERTUBE_RELEASE=v2.1.1
ENV PEERTUBE_RELEASE=v3.3.0
RUN apk add wget RUN apk add wget
COPY ./*.patch /tmp
RUN mkdir -p /opt/apps/peertube && \ RUN mkdir -p /opt/apps/peertube && \
cd /opt/apps/peertube && \ cd /opt/apps/peertube && \
wget https://github.com/Chocobozzz/PeerTube/releases/download/${PEERTUBE_RELEASE}/peertube-${PEERTUBE_RELEASE}.tar.xz && \ wget https://github.com/Chocobozzz/PeerTube/releases/download/${PEERTUBE_RELEASE}/peertube-${PEERTUBE_RELEASE}.tar.xz && \
@ -25,12 +24,12 @@ RUN mkdir -p /opt/apps/peertube && \
rm peertube-${PEERTUBE_RELEASE}.tar.xz && \ rm peertube-${PEERTUBE_RELEASE}.tar.xz && \
mv peertube-${PEERTUBE_RELEASE}/* . && \ mv peertube-${PEERTUBE_RELEASE}/* . && \
rmdir peertube-${PEERTUBE_RELEASE} && \ rmdir peertube-${PEERTUBE_RELEASE} && \
cat /tmp/*.patch | patch -p1 && \
mkdir -p /etc/peertube /var/lib/peertube && \ mkdir -p /etc/peertube /var/lib/peertube && \
ln -sf /var/lib/peertube /opt/apps/peertube/storage ln -sf /var/lib/peertube /opt/apps/peertube/storage
RUN apk add yarn ## Build command RUN apk add yarn ## Build command
RUN apk add git build-base python bash ## Build deps
RUN apk add git build-base python3 bash && \
ln -sf /usr/bin/python3 /usr/local/bin/python ## Build deps
RUN chown -R peertube:peertube /opt/apps/peertube RUN chown -R peertube:peertube /opt/apps/peertube
@ -39,7 +38,6 @@ RUN apk add npm ## only needed to install things that should be in yarn
USER peertube USER peertube
RUN cd /opt/apps/peertube && \ RUN cd /opt/apps/peertube && \
npm install bcrypt && \
yarn install --production --pure-lockfile && \ yarn install --production --pure-lockfile && \
yarn cache clean yarn cache clean
@ -92,7 +90,7 @@ VOLUME /etc/peertube
EXPOSE 9000 EXPOSE 9000
RUN apk add nodejs-npm
RUN apk add nodejs npm
## runtime deps ## runtime deps
RUN apk add openssl RUN apk add openssl

26
peertube/build/dbname.patch

@ -1,26 +0,0 @@
diff --git a/dist/server/initializers/checker-before-init.js b/dist/server/initializers/checker-before-init.js
index d8422ee..5eb3678 100644
--- a/dist/server/initializers/checker-before-init.js
+++ b/dist/server/initializers/checker-before-init.js
@@ -16,7 +16,7 @@ function checkMissedConfig() {
const required = ['listen.port', 'listen.hostname',
'webserver.https', 'webserver.hostname', 'webserver.port',
'trust_proxy',
- 'database.hostname', 'database.port', 'database.suffix', 'database.username', 'database.password', 'database.pool.max',
+ 'database.hostname', 'database.port', 'database.dbname', 'database.username', 'database.password', 'database.pool.max',
'smtp.hostname', 'smtp.port', 'smtp.username', 'smtp.password', 'smtp.tls', 'smtp.from_address',
'email.body.signature', 'email.subject.prefix',
'storage.avatars', 'storage.videos', 'storage.logs', 'storage.previews', 'storage.thumbnails', 'storage.torrents', 'storage.cache',
diff --git a/dist/server/initializers/config.js b/dist/server/initializers/config.js
index 6aa916f..89d16fe 100644
--- a/dist/server/initializers/config.js
+++ b/dist/server/initializers/config.js
@@ -12,7 +12,7 @@ const CONFIG = {
HOSTNAME: config.get('listen.hostname')
},
DATABASE: {
- DBNAME: 'peertube' + config.get('database.suffix'),
+ DBNAME: config.get('database.dbname'),
HOSTNAME: config.get('database.hostname'),
PORT: config.get('database.port'),
USERNAME: config.get('database.username'),

1
peertube/hooks/init

@ -66,6 +66,7 @@ for section in "${VALID_SECTION[@]}"; do
done >> "$HOST_CONFIG_DIR/local.yaml" done >> "$HOST_CONFIG_DIR/local.yaml"
if ! [ -e "$HOST_DATA_DIR/config.json" ]; then if ! [ -e "$HOST_DATA_DIR/config.json" ]; then
mkdir -p "$HOST_DATA_DIR"
echo "{}" > "$HOST_DATA_DIR/config.json" echo "{}" > "$HOST_DATA_DIR/config.json"
fi fi

2
peertube/hooks/postgres_database-relation-joined

@ -15,7 +15,7 @@ cat <<EOF >> "$HOST_CONFIG_DIR/local.yaml"
database: database:
hostname: '$TARGET_SERVICE_NAME' hostname: '$TARGET_SERVICE_NAME'
## We had to patch peertube to have a direct dbname (doh!) ## We had to patch peertube to have a direct dbname (doh!)
dbname: '$DBNAME'
name: '$DBNAME'
port: 5432 port: 5432
username: '$USER' username: '$USER'
password: '$PASSWORD' password: '$PASSWORD'

2
postgres/metadata.yml

@ -15,7 +15,7 @@ uses:
schedule: "31 * * * *" ## schedule backup every hour schedule: "31 * * * *" ## schedule backup every hour
## This one is useful only if previous relation is used ## This one is useful only if previous relation is used
backup: backup:
constraint: optional
constraint: recommended
auto: pair auto: pair
solves: solves:
backup: "Automatic regular backups of dumps" backup: "Automatic regular backups of dumps"

4
precise/0k-odoo-light/hooks/install

@ -15,7 +15,7 @@ DEPS_TO_REMOVE="git"
KEEP_ONLY_PO=${KEEP_ONLY_PO:-fr en de} KEEP_ONLY_PO=${KEEP_ONLY_PO:-fr en de}
apt-get install -y --force-yes --no-install-recommends \
apt-get install -y --no-install-recommends \
$DEPS $DEPS_TO_REMOVE $DEPS $DEPS_TO_REMOVE
## XXXvlab: should use base-0k code instead ! ## XXXvlab: should use base-0k code instead !
@ -71,7 +71,7 @@ fi
RELEASE=jessie VIRTUALENV= hooks/install RELEASE=jessie VIRTUALENV= hooks/install
) )
apt-get remove -y --force-yes $DEPS_TO_REMOVE
apt-get remove -y $DEPS_TO_REMOVE
apt-get autoremove -y apt-get autoremove -y
rm -rf /opt/apps/git-sub /usr/lib/git-core/git-sub rm -rf /opt/apps/git-sub /usr/lib/git-core/git-sub

2
precise/apt-cacher/hooks/install

@ -3,7 +3,7 @@
set -eux set -eux
apt-get install -y --force-yes apt-cacher-ng
apt-get install -y apt-cacher-ng
## This is needed to enable https_port ## This is needed to enable https_port

42
precise/base-0k/hooks/install.d/00-base.sh

@ -2,21 +2,55 @@
set +eux set +eux
## Certificate DST_Root_CA-X3 expired, it needs to be removed
## from list of available certificates. Debian <10 have the issue.
##
## Fixing: https://www.reddit.com/r/sysadmin/comments/pzags0/lets_encrypts_dst_root_ca_x3_expired_yesterday/
## see also: https://techcrunch.com/2021/09/21/lets-encrypt-root-expiry/?guccounter=1
modified_certificate=
mkdir -p /usr/local/share/ca-certificates/custom
for certfile_name in isrgrootx1:ISRG_Root_X1 isrg-root-x2 lets-encrypt-r3; do
certfile=${certfile_name%%:*}
name=${certfile_name#*:}
echo "Checking $certfile for $name"
if ! [ -e "/usr/local/share/ca-certificates/custom/$certfile".crt ] &&
! [ -e "/etc/ssl/certs/$name.pem" ]; then
wget --no-check-certificate https://letsencrypt.org/certs/"$certfile".pem \
-O "/usr/local/share/ca-certificates/custom/$certfile".crt
modified_certificate=1
fi
done
if grep "^mozilla/DST_Root_CA_X3.crt" /etc/ca-certificates.conf 2>/dev/null 2>&1; then
sed -ri 's%^(mozilla/DST_Root_CA_X3.crt)%!\1%g' /etc/ca-certificates.conf
fi
if [ -n "$modified_certificate" ]; then
update-ca-certificates
fi
## We can now do the ``apt-get update`` safely...
apt-get update apt-get update
apt-get -y --force-yes install bash-completion wget bzip2 git-core \
apt-get -y install bash-completion wget bzip2 git-core \
less tmux mosh \ less tmux mosh \
sudo git vim file </dev/null sudo git vim file </dev/null
apt-get -y --force-yes python-software-properties </dev/null ||
apt-get -y --force-yes software-properties-common </dev/null
apt-get -y python-software-properties </dev/null ||
apt-get -y software-properties-common </dev/null
case $(lsb_release -is) in case $(lsb_release -is) in
Ubuntu) Ubuntu)
apt-get install -y --force-yes language-pack-en </dev/null
apt-get install -y language-pack-en </dev/null
;; ;;
Debian) Debian)
sed -ri 's/^\s*#\s*(en_US\.UTF-?8.*)\s*$/\1/g' /etc/locale.gen sed -ri 's/^\s*#\s*(en_US\.UTF-?8.*)\s*$/\1/g' /etc/locale.gen
locale-gen locale-gen
;; ;;
esac esac

2
precise/base-0k/hooks/install.d/05-shyaml.sh

@ -2,6 +2,6 @@
## For shyaml ## For shyaml
apt-get install -y --force-yes python-pip libyaml-dev python-dev </dev/null
apt-get install -y python-pip libyaml-dev python-dev </dev/null
pip install shyaml --upgrade pip install shyaml --upgrade

6
precise/base-0k/hooks/install.d/20-kal-scripts.sh

@ -10,7 +10,7 @@ set -eux
if ! [ -e /etc/apt/sources.list.d/kalysto.org.list ]; then if ! [ -e /etc/apt/sources.list.d/kalysto.org.list ]; then
## Required to fetch our repository in https ## Required to fetch our repository in https
apt-get install -y --force-yes apt-transport-https </dev/null
apt-get install -y apt-transport-https </dev/null
cat <<EOF > /etc/apt/sources.list.d/kalysto.org.list cat <<EOF > /etc/apt/sources.list.d/kalysto.org.list
## vlab's shell libraries ## vlab's shell libraries
@ -18,7 +18,7 @@ deb https://deb.kalysto.org no-dist kal-alpha kal-beta kal-main
EOF EOF
if ! type gpg >/dev/null; then if ! type gpg >/dev/null; then
apt-get install -y --force-yes gnupg2 </dev/null
apt-get install -y gnupg2 </dev/null
fi fi
## Include the GPG key ## Include the GPG key
wget -O - https://deb.kalysto.org/conf/public-key.gpg | apt-key add - wget -O - https://deb.kalysto.org/conf/public-key.gpg | apt-key add -
@ -28,4 +28,4 @@ EOF
-o Dir::Etc::sourceparts="-" -o APT::Get::List-Cleanup="0" -o Dir::Etc::sourceparts="-" -o APT::Get::List-Cleanup="0"
fi fi
apt-get install -y --force-yes kal-scripts </dev/null
apt-get install -y kal-scripts </dev/null

80
precise/base-0k/hooks/install.d/30-customize.sh

@ -24,14 +24,57 @@ else
cp /root/.bashrc /root/.bashrc.pre-install cp /root/.bashrc /root/.bashrc.pre-install
fi fi
if ! type -p fzf; then
## Required to get fzf
case $(lsb_release -is) in
Debian)
case $(lsb_release -rs) in
9)
backports_list="/etc/apt/sources.list.d/backports.list"
if ! [ -e "$backports_list" ]; then
echo "deb http://ftp.debian.org/debian stretch-backports main" > \
"$backports_list"
## Update only this repo:
apt-get update -o Dir::Etc::sourcelist="sources.list.d/backports.list" \
-o Dir::Etc::sourceparts="-" -o APT::Get::List-Cleanup="0"
fi
;;
esac
;;
esac
apt-get install fzf </dev/null
fi
if ! type -p fd-find; then
if apt-get install fd-find </dev/null; then
ln -sf "$(which fdfind)" /usr/local/bin/fd
else
wget https://github.com/sharkdp/fd/releases/download/v8.2.1/fd_8.2.1_amd64.deb -O /tmp/fd.deb
dpkg -i /tmp/fd.deb
fi
fi
if ! [ -e "/usr/share/doc/fzf/examples/key-bindings.bash.orig" ]; then
## Use C-f instead of C-t for file insertion
sed -r -i.orig 's/C-t/C-f/g' /usr/share/doc/fzf/examples/key-bindings.bash
fi
cat <<EOF >> /root/.bashrc cat <<EOF >> /root/.bashrc
## History management ## History management
export HISTCONTROL=ignoredups
export HISTSIZE=50000
export HISTCONTROL=ignoreboth
export HISTSIZE=500000
export HISTIGNORE="&:[bf]g:exit:ls:history"
export HISTFILESIZE=
export HISTTIMEFORMAT="%Y-%m-%d %T "
shopt -s histappend shopt -s histappend
PROMPT_COMMAND='history -a'
## Prompt easy management ## Prompt easy management
@ -54,5 +97,36 @@ function glog() {
prompt 1 prompt 1
PROMPT_COMMAND='history -a' ## after prompt setting as it resets it
##
## fzf (apt-get install fzf) and fd (apt-get install fd-find)
##
if [ -e /usr/share/doc/fzf/examples/key-bindings.bash ]; then
. /usr/share/doc/fzf/examples/key-bindings.bash
fi
if [ -e /usr/share/doc/fzf/examples/completion.bash ]; then
. /usr/share/doc/fzf/examples/completion.bash
fi
#export FZF_DEFAULT_OPTS="--color 'fg:#bbccdd,fg+:#ddeeff,bg:#111820,preview-bg:#223344,border:#778899'"
export FZF_DEFAULT_COMMAND='fd --type f --hidden --follow --exclude .git'
export FZF_CTRL_T_COMMAND="$FZF_DEFAULT_COMMAND"
EOF EOF
cat <<EOF >> /root/.bash_profile
## XXXvlab:
## http://stackoverflow.com/questions/9652126/bashrc-profile-is-not-loaded-on-new-tmux-session-or-window-why
## Including ``.bashrc`` if it exists (tmux don't load bashrc, and bashrc
## don't load profile... so not recursive call)
if [ -f ~/.bashrc ]; then
. ~/.bashrc
fi
EOF

2
precise/ca/hooks/install

@ -2,7 +2,7 @@
set -eux # -x for verbose logging to juju debug-log set -eux # -x for verbose logging to juju debug-log
apt-get install -y --force-yes kal-manage ## this is for ``mkcrt``
apt-get install -y kal-manage ## this is for ``mkcrt``
CA_SUBJECT=${CA_SUBJECT:-/C=FR/ST=France/O=Kalysto/CN=kal.fr/emailAddress=ca@kal.fr} CA_SUBJECT=${CA_SUBJECT:-/C=FR/ST=France/O=Kalysto/CN=kal.fr/emailAddress=ca@kal.fr}

2
precise/git/hooks/install

@ -9,7 +9,7 @@ GIT_0K_BASE=${GIT_0K_BASE:-"0k-ro:/var/git"}
GIT_0K_CLONE_OPTIONS=${GIT_0K_CLONE_OPTIONS:-""} GIT_0K_CLONE_OPTIONS=${GIT_0K_CLONE_OPTIONS:-""}
apt-get install -y --force-yes kal-shlib-common
apt-get install -y kal-shlib-common
apt-get install -y bzr apt-get install -y bzr

4
precise/host/hooks/install.d/38-ntp.sh

@ -0,0 +1,4 @@
#!/bin/bash
apt-get install ntp -y </dev/null

3
precise/host/hooks/install.d/39-logrotate.sh

@ -0,0 +1,3 @@
#!/bin/bash
apt-get install logrotate -y </dev/null

5
precise/host/hooks/install.d/40-btrfs.sh

@ -51,9 +51,10 @@ if [ "$UPDATE_BTRFS_TOOLS" ]; then
cd btrfs-progs && cd btrfs-progs &&
apt-get install -y asciidoc xmlto --no-install-recommends </dev/null && apt-get install -y asciidoc xmlto --no-install-recommends </dev/null &&
apt-get install -y build-essential autoconf pkg-config uuid-dev libattr1-dev \ apt-get install -y build-essential autoconf pkg-config uuid-dev libattr1-dev \
zlib1g-dev libacl1-dev e2fslibs-dev libblkid-dev liblzo2-dev </dev/null &&
zlib1g-dev python3-dev python3-setuptools libacl1-dev e2fslibs-dev \
libblkid-dev liblzo2-dev libzstd-dev </dev/null &&
./autogen.sh && ./autogen.sh &&
./configure --prefix=/opt/apps/btrfs-tools &&
./configure --prefix=/opt/apps/btrfs-tools --disable-zoned &&
make && make &&
make install && make install &&
ln -sf /opt/apps/btrfs-tools/bin/* /usr/local/bin/ ln -sf /opt/apps/btrfs-tools/bin/* /usr/local/bin/

45
precise/host/hooks/install.d/50-lxc.sh

@ -1,8 +1,51 @@
#!/bin/bash #!/bin/bash
apt-get install lxc -y --force-yes </dev/null
apt-get install lxc -y </dev/null
## required to access the created lxc ! ## required to access the created lxc !
if ! [ -e ~/.ssh/id_rsa ]; then if ! [ -e ~/.ssh/id_rsa ]; then
ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa -q ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa -q
fi fi
## From: https://wiki.debian.org/LXC#Independent_bridge_setup
lxc_net="$(cat /etc/lxc/default.conf |
grep ^lxc.net.0.type |
cut -f 2 -d = |
xargs echo)"
if [ "$lxc_net" == "empty" ]; then
## Boggers, we are on default unconfigured networks for lxc
sed -ri 's/^lxc.net.0.type = empty/lxc.net.0.type = veth\nlxc.net.0.link = lxcbr0\nlxc.net.0.flags = up\nlxc.net.0.hwaddr = 00:16:3e:xx:xx:xx/g' /etc/lxc/default.conf
[ -e "/etc/default/lxc-net" ] || {
cat <<EOF > /etc/default/lxc-net
USE_LXC_BRIDGE="true"
# If you change the LXC_BRIDGE to something other than lxcbr0, then
# you will also need to update your /etc/lxc/default.conf as well as the
# configuration (/var/lib/lxc/<container>/config) for any containers
# already created using the default config to reflect the new bridge
# name.
# If you have the dnsmasq daemon installed, you'll also have to update
# /etc/dnsmasq.d/lxc and restart the system wide dnsmasq daemon.
LXC_BRIDGE="lxcbr0"
LXC_ADDR="172.101.0.1"
LXC_NETMASK="255.255.255.0"
LXC_NETWORK="172.101.0.0/24"
LXC_DHCP_RANGE="172.101.0.2,172.101.0.254"
LXC_DHCP_MAX="253"
# Uncomment the next line if you'd like to use a conf-file for the lxcbr0
# dnsmasq. For instance, you can use 'dhcp-host=mail1,172.46.0.100' to have
# container 'mail1' always get ip address 172.46.0.100.
LXC_DHCP_CONFILE=/etc/lxc/dnsmasq.conf
# Uncomment the next line if you want lxcbr0's dnsmasq to resolve the .lxc
# domain. You can then add "server=/lxc/172.46.0.1' (or your actual )
# to /etc/dnsmasq.conf, after which 'container1.lxc' will resolve on your
# host.
#LXC_DOMAIN="lxc"
EOF
}
service lxc-net restart
fi

32
precise/host/hooks/install.d/60-docker.sh

@ -1,26 +1,32 @@
#!/bin/bash #!/bin/bash
need_restart=
just_installed=
if ! type -p docker; then if ! type -p docker; then
echo "Installing docker..." echo "Installing docker..."
curl -sSL https://get.docker.io | sh
fi
type -p curl >dev/null ||
apt-get install -y curl </dev/null
docker_version=17.06
if ! [[ "$(docker --version)" == "Docker version $docker_version"* ]]; then
version="$(apt-cache madison docker-ce | cut -f 2 -d \| | grep "$docker_version" | head -n 1 | xargs echo)"
## DOWNGRADE to 17.xx because 18.xx do not support registry v1
apt-get install -y --force-yes docker-ce="$version"
need_restart=true
curl -sSL https://get.docker.io | sh || exit 1
just_installed=1
fi fi
if ! egrep 'disable-legacy-registry' /lib/systemd/system/docker.service 2>/dev/null; then
sed -ri 's/^(ExecStart=.*)$/\1 --disable-legacy-registry=false/g' /lib/systemd/system/docker.service
if [ -n "$just_installed" ]; then
need_restart=
docker_version=17
if ! [[ "$(docker --version)" == "Docker version $docker_version"* ]]; then
version="$(apt-cache madison docker-ce |
cut -f 2 -d \| |
grep "$docker_version" |
head -n 1 | xargs echo)"
## DOWNGRADE to 17.xx because 18.xx do not support registry v1
apt-get install -y --allow-downgrades docker-ce="$version"
need_restart=true need_restart=true
fi
fi
if [ "$need_restart" ]; then
if [ -n "$need_restart" ] && [ -z "$NO_DOCKER_RESTART" ]; then
systemctl daemon-reload && systemctl daemon-reload &&
service docker restart service docker restart
fi
fi fi

1
precise/host/hooks/install.d/61-mirror-dir.sh

@ -0,0 +1 @@
../../../../rsync-backup/hooks/install.d/60-install.sh

23
precise/host/hooks/install.d/70-0k.sh

@ -8,6 +8,7 @@ GIT_0K_BASE=${GIT_0K_BASE:-"0k-ro:/var/git"}
## 0k git remote options ## 0k git remote options
GIT_0K_CLONE_OPTIONS=${GIT_0K_CLONE_OPTIONS:-""} GIT_0K_CLONE_OPTIONS=${GIT_0K_CLONE_OPTIONS:-""}
NO_DOCKER_RESTART=${NO_DOCKER_RESTART:-}
## ##
## Install 0k-manage ## Install 0k-manage
@ -25,7 +26,11 @@ mkdir -p /opt/apps
git checkout 0k/prod/master git checkout 0k/prod/master
fi fi
pip install sact.epoch || exit 1
## Debian 9 did not have setuptool
if [ "$(python -c 'import setuptools' 2>&1 | tail -n 1)" == "ImportError: No module named setuptools" ]; then
pip install setuptools
fi
pip install sact.epoch
if [ "$(python -c 'import sact.epoch' 2>&1 | tail -n 1)" == "ImportError: No module named interface" ]; then if [ "$(python -c 'import sact.epoch' 2>&1 | tail -n 1)" == "ImportError: No module named interface" ]; then
echo "Error: conflicting installation of zope.interface detected. Trying workaround." echo "Error: conflicting installation of zope.interface detected. Trying workaround."
( (
@ -41,8 +46,8 @@ mkdir -p /opt/apps
exit 1 exit 1
fi fi
fi fi
ln -sf /opt/apps/0k-manage/src/bin/pick2del_backups /usr/local/bin/
# ln -sf /opt/apps/0k-manage/src/bin/* /usr/local/bin/ # ln -sf /opt/apps/0k-manage/src/bin/* /usr/local/bin/
) )
@ -51,7 +56,7 @@ mkdir -p /opt/apps
## ##
if [ -f /etc/compose/local.conf ]; then if [ -f /etc/compose/local.conf ]; then
sed -ri 's%^(. /opt/venv/docker-compose/bin/activate)$%# \1 ## docker-compsoe not needed anymore%g' \
sed -ri 's%^(. /opt/venv/docker-compose/bin/activate)$%# \1 ## docker-compose not needed anymore%g' \
/etc/compose/local.conf /etc/compose/local.conf
fi fi
@ -61,7 +66,7 @@ fi
## ##
( (
apt-get install -y kal-shlib-charm kal-shlib-cache kal-shlib-cmdline </dev/null
apt-get install -y kal-shlib-{common,charm,cache,cmdline,config} </dev/null
if [ -d "/opt/apps/0k-charm" ]; then if [ -d "/opt/apps/0k-charm" ]; then
cd /opt/apps/0k-charm && cd /opt/apps/0k-charm &&
git checkout master && git checkout master &&
@ -92,7 +97,7 @@ fi
if [ -d "/srv/charm-store" ]; then if [ -d "/srv/charm-store" ]; then
if [ -L "/srv/charm-store" ]; then if [ -L "/srv/charm-store" ]; then
info "Already have a valid /srv/charm-store"
echo "Already have a valid /srv/charm-store"
elif [ -L "/srv/charm-store/0k-charms" ]; then elif [ -L "/srv/charm-store/0k-charms" ]; then
mv /srv/charm-store{,.old} && mv /srv/charm-store{,.old} &&
mv /srv/charm-store.old/0k-charms /srv/charm-store && mv /srv/charm-store.old/0k-charms /srv/charm-store &&
@ -144,6 +149,7 @@ fi
## ##
( (
apt-get install -y kal-shlib-docker jq </dev/null
if [ -d "/opt/apps/0k-docker" ]; then if [ -d "/opt/apps/0k-docker" ]; then
cd /opt/apps/0k-docker && cd /opt/apps/0k-docker &&
git checkout master && git checkout master &&
@ -194,7 +200,7 @@ if ! [ -L "$ca_ln" ] || [ "$(realpath "$ca_ln")" != "$ca" ] ; then
need_restart=1 need_restart=1
fi fi
if [ "$need_restart" ]; then
if [ -n "$need_restart" ] && [ -z "$NO_DOCKER_RESTART" ]; then
service docker restart service docker restart
fi fi
@ -224,7 +230,6 @@ rm -rf /var/cache/compose
cat <<EOF > /etc/default/datastore cat <<EOF > /etc/default/datastore
DATASTORE=/srv/datastore DATASTORE=/srv/datastore
SNAPSHOT_BACKUP=/var/backups/snapshot
EOF EOF
cat <<EOF > /etc/default/compose cat <<EOF > /etc/default/compose
@ -243,7 +248,7 @@ export CONFIGSTORE=\$DOCKER_DATASTORE/config
EOF EOF
if ! egrep "^DEFAULT_COMPOSE_FILE=/etc/compose/compose.yml$" /etc/compose/local.conf >/dev/null 2>&1; then
if ! egrep "^DEFAULT_COMPOSE_FILE=" /etc/compose/local.conf >/dev/null 2>&1; then
mkdir /etc/compose -p mkdir /etc/compose -p
touch /etc/compose/local.conf touch /etc/compose/local.conf
echo "DEFAULT_COMPOSE_FILE=/etc/compose/compose.yml" >> /etc/compose/local.conf echo "DEFAULT_COMPOSE_FILE=/etc/compose/compose.yml" >> /etc/compose/local.conf
@ -268,6 +273,6 @@ fi
ln -sfnv /opt/apps/0k-pgm/bin/* /usr/local/bin/ ln -sfnv /opt/apps/0k-pgm/bin/* /usr/local/bin/
find -L /usr/local/bin -maxdepth 1 -type l -ilname /opt/apps/0k-pgm/bin/\* -delete find -L /usr/local/bin -maxdepth 1 -type l -ilname /opt/apps/0k-pgm/bin/\* -delete
apt-get install -y --force-yes pv buffer < /dev/null
apt-get install -y pv buffer < /dev/null
apt-get install -y postgresql-client </dev/null apt-get install -y postgresql-client </dev/null
) )

23
precise/host/hooks/install.d/75-fail2ban.sh

@ -0,0 +1,23 @@
#!/bin/bash
## Depends lxc-scripts installed
##
## Install
##
apt-get install -y fail2ban </dev/null
sed -ri 's/^(bantime\s+=\s+.*)$/bantime = 1w/g' /etc/fail2ban/jail.conf
sed -ri 's/^(findtime\s+=\s+.*)$/findtime = 26w/g' /etc/fail2ban/jail.conf
##
## Test
##
# fail2ban-client status
# fail2ban-client status sshd

19
precise/host/hooks/install.d/80-dns-waterfall.sh

@ -8,7 +8,7 @@
HOST_EXTERNAL_DEVICE=${HOST_EXTERNAL_DEVICE:-eth0} HOST_EXTERNAL_DEVICE=${HOST_EXTERNAL_DEVICE:-eth0}
apt-get install -y bind9 dnsmasq
apt-get install -y bind9 dnsmasq bind9-host </dev/null
echo HOST_EXTERNAL_DEVICE="$HOST_EXTERNAL_DEVICE" >> /etc/default/lxc echo HOST_EXTERNAL_DEVICE="$HOST_EXTERNAL_DEVICE" >> /etc/default/lxc
sed -ri "s%10\.0\.3\.%$LXC_NETWORK.%g;s%^#LXC_DHCP_CONFILE=%LXC_DHCP_CONFILE=%g" /etc/default/lxc-net sed -ri "s%10\.0\.3\.%$LXC_NETWORK.%g;s%^#LXC_DHCP_CONFILE=%LXC_DHCP_CONFILE=%g" /etc/default/lxc-net
@ -23,6 +23,7 @@ HOST_IP=$(. /etc/default/lxc && ifip "$HOST_EXTERNAL_DEVICE")
echo " echo "
server=$LXC_ADDR server=$LXC_ADDR
interface=lo interface=lo
bind-interfaces
no-negcache no-negcache
log-queries log-queries
log-facility=/var/log/dnsmasq.log log-facility=/var/log/dnsmasq.log
@ -30,9 +31,11 @@ log-facility=/var/log/dnsmasq.log
echo " echo "
server=${HOST_IP} server=${HOST_IP}
bind-interfaces
log-queries log-queries
no-negcache no-negcache
log-facility=/var/log/lxc-dnsmasq.log log-facility=/var/log/lxc-dnsmasq.log
no-resolv
" >> /etc/lxc/dnsmasq.conf " >> /etc/lxc/dnsmasq.conf
( (
@ -44,16 +47,18 @@ log-facility=/var/log/lxc-dnsmasq.log
mkdir /var/log/named -p && mkdir /var/log/named -p &&
chown bind:bind /var/log/named chown bind:bind /var/log/named
/etc/init.d/bind9 restart
/etc/init.d/dnsmasq restart
/etc/init.d/bind9 stop
/etc/init.d/dnsmasq stop
service lxc restart service lxc restart
service lxc-net restart ## had to 'brctl delbr lxcbr0' myself service lxc-net restart ## had to 'brctl delbr lxcbr0' myself
/etc/init.d/dnsmasq start
/etc/init.d/bind9 start
cp /etc/resolv.conf{,.orig} cp /etc/resolv.conf{,.orig}
cat <<EOF > /etc/resolv.conf cat <<EOF > /etc/resolv.conf
nameserver 127.0.0.1 nameserver 127.0.0.1
#domain . ## didn't work on 12.04
search localdomain ## imperfect, we don't want to search www.localdomain
EOF EOF
## ##
@ -69,7 +74,7 @@ cat <<EOF > /etc/logrotate.d/dnsmasq
compress compress
postrotate postrotate
kill -s SIGUSR2 "\$(cat /var/run/dnsmasq/dnsmasq.pid)"
/bin/kill -s SIGUSR2 "\$(cat /var/run/dnsmasq/dnsmasq.pid)"
endscript endscript
} }
@ -85,7 +90,7 @@ cat <<EOF > /etc/logrotate.d/lxc-dnsmasq
compress compress
postrotate postrotate
kill -s SIGUSR2 "\$(cat /var/run/lxc/dnsmasq.pid)"
/bin/kill -s SIGUSR2 "\$(cat /var/run/lxc/dnsmasq.pid)"
endscript endscript
} }

245
precise/host/hooks/install.d/90-shorewall.sh

@ -8,6 +8,8 @@
## Install ## Install
## ##
HOST_EXTERNAL_DEVICE=${HOST_EXTERNAL_DEVICE:-eth0}
version_gt() { test "$(echo "$@" | tr " " "\n" | sort -V | head -n 1)" != "$1"; } version_gt() { test "$(echo "$@" | tr " " "\n" | sort -V | head -n 1)" != "$1"; }
shorewall_candidate_version=$(echo $(apt-cache policy shorewall | grep "Candidate:" | cut -f 2 -d :)) shorewall_candidate_version=$(echo $(apt-cache policy shorewall | grep "Candidate:" | cut -f 2 -d :))
@ -29,23 +31,179 @@ else
} }
fi fi
case $(lsb_release -is) in
Debian)
case $(lsb_release -rs) in
10)
## we had trouble with ``nft`` shorewall
update-alternatives --set iptables /usr/sbin/iptables-legacy
;;
esac
;;
esac
apt-get install -y dnsutils </dev/null
## ##
## Configuration ## Configuration
## ##
cat <<EOF > /etc/shorewall/README
Important notes gathered through time:
# Shorewall duties on our host
- block any access from outside to local ports if not mentionned
explicitely in shorewall.
- connect external ports to LXC (dockers has its own means)
- This uses ``/var/lib/lxc/*/shorewall`` files
- let mosh connect correctly
- ensure a correct access from Host/LXC/Docker to server's services.
For instance, an Host/LXC/Docker should be able to as if it was
external: ``curl https://myhostwebsite``. This is called routeback
and requires some special rules.
# Shorewall restarting and cache
Some process in shorewall seems to be using cache in some ways in
recent version that implies that it won't take actions if files are
not changed. A simple 'touch FILE' seems to be enough. Notice the
'Compiling' lines appearing in ``shorewall restart``.
It's always good to double-check in ``iptables -nL`` that some rules
actually seem to match your intention.
Don't forget that ``iptables-save`` is probably the best way to get
the full rules printed on stdout.
# Debian, ovh kernels and iptables-nft
Starting from Debian10, iptables by default uses iptables-nft... which
works well with default debian kernel. OVH kernels DO NOT provide
necessary kernel and we must:
update-alternatives --set iptables /usr/sbin/iptables-legacy
Note that transition is a little tricky because BOTH ways can have
their tables simultaneously. Use ``iptables-nft -nL`` and
``iptables-legacy -nL`` to check.
For now, we had little success to properly have the ``nft`` version
working properly on debian kernel. So even on debian kernel, we switch
here to iptables-legacy if on debian system.
# Interaction with docker's iptables rules
This is configured in ``shorewall.conf``, thanks to a simple::
DOCKER=Yes
# Route back
Be sure to check in /var/lib/lxc/*/shorewall definitions, they
must include special stances (see in next section).
On the side of shorewall, all network interface should be declared in
``/etc/shorewall/interfaces``.
# lxc ``shorewall`` files
Prefer the usage of ``ports`` files. If you insist on having a better
control of rules per LXC, you can use ``shorewall`` files.
They should be located in /var/lib/lxc/*/shorewall. This is a standard
redirection from external host port 10022 to lxc's port 22, on port
tcp::
DNAT net lan:%%IP%%:22 tcp 10022
#DNAT net lan:%%IP%%:22 udp 10022
Routeback (access of the same service from Host/LXC/Docker on the external
address) is given by these additional rules::
DNAT lan lan:www:80 tcp 80 - %%HOST_INTERNET_IP%%
DNAT lan lan:www:443 tcp 443 - %%HOST_INTERNET_IP%%
DNAT fw lan:www:80 tcp 80 - %%HOST_INTERNET_IP%%
DNAT fw lan:www:443 tcp 443 - %%HOST_INTERNET_IP%%
# lxc ``ports`` files
They should be located in /var/lib/lxc/*/ports. This is a standard
redirection from external host port 10022 to lxc's port 22, on both
tcp and udp::
10022:22 ## Normal port
# 10023:23 ## This is commented !
Note that comments are supported also.
EOF
cat <<EOF > /etc/shorewall/zones cat <<EOF > /etc/shorewall/zones
fw firewall fw firewall
net ipv4 net ipv4
lan ipv4 lan ipv4
EOF EOF
cat <<EOF > /etc/shorewall/macro.Mosh
#######################################################################################################
# DO NOT REMOVE THE FOLLOWING LINE
##############################################################################################################################################################
#ACTION SOURCE DEST PROTO DPORT SPORT ORIGDEST RATE USER MARK CONNLIMITTIME HEADERS SWITCH HELPER
#
PARAM - - udp 60000:61000
EOF
cat <<EOF > /etc/shorewall/interfaces cat <<EOF > /etc/shorewall/interfaces
#ZONE INTERFACE BROADCAST OPTIONS #ZONE INTERFACE BROADCAST OPTIONS
net eth0
net $HOST_EXTERNAL_DEVICE
## Uncomment to enable vpn setup ## Uncomment to enable vpn setup
#vpn tun0 detect #vpn tun0 detect
## All interfaces that require route back should be listed
## here:
lan lxcbr0 - routeback lan lxcbr0 - routeback
BEGIN SHELL
ifconfig=\$(ifconfig)
echo "BEGIN DOCKER adding networks rules:" >&2
for docker_net in \$(docker network list -f driver=bridge -q); do
gws=\$(docker network inspect "\$docker_net" --format "{{range .IPAM.Config}}{{.Gateway}}{{\"\n\"}}{{end}}") || continue
for gw in \$gws; do
if=\$(printf "%s" "\$ifconfig" | egrep "\$gw" -B 1 | head -n 1 | cut -f 1 -d " ")
echo " lan \$if - routeback" >&2
echo "lan \$if - routeback"
done
done
echo "END DOCKER" >&2
true
END SHELL
EOF EOF
cat <<EOF > /etc/shorewall/policy cat <<EOF > /etc/shorewall/policy
@ -61,16 +219,31 @@ cat <<EOF > /etc/shorewall/rules
SSH/ACCEPT net fw SSH/ACCEPT net fw
Ping/ACCEPT net fw Ping/ACCEPT net fw
Mosh(ACCEPT) net fw
BEGIN SHELL BEGIN SHELL
host_ip="\$(/sbin/ifconfig eth0 2> /dev/null | sed "s/^.*inet ad\+r://g" | grep ^[0-9] | sed "s/ .*$//g")"
host_ip="\$(/sbin/ifconfig $HOST_EXTERNAL_DEVICE 2> /dev/null | sed "s/^.*inet //g" | grep ^[0-9] | sed "s/ .*$//g")"
for name in \$(lxc-ls-running); do for name in \$(lxc-ls-running); do
ip=\$(dig +short A "\$name") ip=\$(dig +short A "\$name")
[ -e "/var/lib/lxc/\$name/shorewall" ] && [ -e "/var/lib/lxc/\$name/shorewall" ] &&
cat /var/lib/lxc/\$name/shorewall | sed -r "s/%%HOST_INTERNET_IP%%/\$host_ip/g" \
| sed -r "s/%%IP%%/\$ip/g"
cat /var/lib/lxc/\$name/shorewall |
sed -r "s/%%HOST_INTERNET_IP%%/\$host_ip/g" |
sed -r "s/%%IP%%/\$ip/g"
if [ -e "/var/lib/lxc/\$name/ports" ]; then
for ports in \$(cat /var/lib/lxc/\$name/ports | sed -r 's/#.*\$//g'); do
lxc_port=\${ports#*:}
ext_port=\${ports%:*}
echo "LXC \$name: redirection from \$host_ip:\$ext_port -> \$ip:\$lxc_port" >&2
for proto in tcp udp; do
for zone in net lan fw; do
echo "DNAT \$zone lan:\$ip:\$lxc_port \$proto \$ext_port - \$host_ip"
done
done
done
fi
done done
@ -81,7 +254,7 @@ END SHELL
EOF EOF
cat <<EOF > /etc/shorewall/masq cat <<EOF > /etc/shorewall/masq
eth0 lxcbr0
$HOST_EXTERNAL_DEVICE lxcbr0
EOF EOF
cat <<EOF > /etc/shorewall/start cat <<EOF > /etc/shorewall/start
@ -90,12 +263,19 @@ cat <<EOF > /etc/shorewall/start
. /etc/default/lxc . /etc/default/lxc
if [ -d "/sys/class/net/\$LXC_BRIDGE" -a "\$(cat /sys/class/net/\$LXC_BRIDGE/operstate)" == "up" ]; then
if [ -d "/sys/class/net/\$LXC_BRIDGE" ] && [ "\$(cat /sys/class/net/\$LXC_BRIDGE/operstate)" = "up" ]; then
source_file=
if [ -e /etc/init/lxc-net.conf ]; then
source_file=/etc/init/lxc-net.conf source_file=/etc/init/lxc-net.conf
code=\$(egrep '^\s+iptables.*\s+-j\s+' /etc/init/lxc-net.conf | grep -v '\-D' | sed -r 's/^\s+[^-]+/run_iptables /g')
elif [ -e /usr/lib/x86_64-linux-gnu/lxc/lxc-net ]; then
source_file=/usr/lib/x86_64-linux-gnu/lxc/lxc-net
fi
if [ "\$source_file" ]; then
code=\$(egrep '^\s+iptables.*\s+-j\s+' \$source_file | grep -v '\-D' | sed -r 's/^\s+[^-]+/run_iptables /g')
echo "Adding LXC rules:" echo "Adding LXC rules:"
echo "\$code" echo "\$code"
eval "\$code" eval "\$code"
fi
fi fi
EOF EOF
@ -112,9 +292,26 @@ EOF
apt-get install -y moreutils ## needed because ``ts`` is used in this script apt-get install -y moreutils ## needed because ``ts`` is used in this script
ln -sf /opt/apps/lxc-scripts/etc/cron.d/lxc-shorewall-repair /etc/cron.d/lxc-shorewall-repair ln -sf /opt/apps/lxc-scripts/etc/cron.d/lxc-shorewall-repair /etc/cron.d/lxc-shorewall-repair
cat <<EOF > /etc/logrotate.d/lxc-shorewall-repair
/var/log/lxc-shorewall-repair.log {
weekly
missingok
dateext
dateyesterday
dateformat _%Y-%m-%d
extension .log
rotate 52
compress
delaycompress
notifempty
create 640 root root
sharedscripts
}
EOF
## ##
## Logs
## LOGS
## ##
mkdir -p /var/log/shorewall mkdir -p /var/log/shorewall
@ -124,25 +321,32 @@ chmod g+w /var/log/shorewall
cat <<EOF > /etc/rsyslog.d/shorewall.conf cat <<EOF > /etc/rsyslog.d/shorewall.conf
:msg, contains, "Shorewall:" /var/log/shorewall/main.log :msg, contains, "Shorewall:" /var/log/shorewall/main.log
& ~ & ~
EOF
cat <<EOF > /etc/logrotate.d/shorewall
/var/log/shorewall/init.log {
weekly
rotate 4
compress
missingok
create 0640 root adm
if \$msg contains 'net-fw DROP IN=' then {
action(type="omfile" file="/var/log/shorewall/net-fw.log")
stop
} }
EOF
cat <<EOF > /etc/logrotate.d/shorewall
/var/log/shorewall/init.log
/var/log/shorewall/net-fw.log
/var/log/shorewall/main.log /var/log/shorewall/main.log
{ {
rotate 7
weekly weekly
missingok missingok
notifempty
dateext
dateyesterday
dateformat _%Y-%m-%d
extension .log
rotate 52
compress compress
delaycompress delaycompress
notifempty
create 640 root root
sharedscripts
postrotate postrotate
reload rsyslog >/dev/null 2>&1 || true reload rsyslog >/dev/null 2>&1 || true
endscript endscript
@ -157,9 +361,12 @@ service rsyslog restart
## ##
##
## Final settings
## ##
## Activate support for docker ## Activate support for docker
sed -ri 's/^DOCKER=No$/DOCKER=Yes/g' /etc/shorewall/shorewall.conf sed -ri 's/^DOCKER=No$/DOCKER=Yes/g' /etc/shorewall/shorewall.conf
sed -ri 's/^IP_FORWARDING=Keep$/IP_FORWARDING=On/g' /etc/shorewall/shorewall.conf

37
precise/host/hooks/install.d/95-checks.sh

@ -2,4 +2,39 @@
## REQUIRES: 0k-manage mail ## REQUIRES: 0k-manage mail
ln -sf /opt/apps/0k-manage/src/etc/cron.hourly/check-* /etc/cron.hourly/
[ -n "${BACKUP_SERVER}" ] || {
echo "Error: you must set \$BACKUP_SERVER prior to running this script." >&2
exit 1
}
if ! [ -e "/etc/default/alerting" ]; then
if [ -z "$DEFAULT_ALERTING_EMAILS" ]; then
echo "You must define \$DEFAULT_ALERTING_EMAILS before launching this script." >&2
exit 1
fi
cat <<EOF > /etc/default/alerting
MAIL_DESTS=(
$(
for email in $DEFAULT_ALERTING_EMAILS; do
echo " $email"
done
)
)
EOF
fi
ln -sfv /opt/apps/0k-manage/src/etc/cron.hourly/check-* /etc/cron.hourly/
if ! [ -e /usr/local/sbin/mirror-dir ]; then
ln -sfv /opt/apps/0k-charms/rsync-backup/resources/bin/mirror-dir /usr/local/sbin/
fi
mailname=$(cat /etc/mailname)
mailname=${mailname%.localdomain}
cat <<EOF > /etc/cron.d/mirror-dir-check
SHELL=/bin/bash
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
35 * * * * root mirror-dir check -d "$BACKUP_SERVER:10023" -n '12 hours' | logger -t mirror-dir-check
EOF

74
precise/host/hooks/install.d/96-backup-lxc.sh

@ -6,5 +6,75 @@
## Backup lxc ## Backup lxc
## ##
ln -sf /opt/apps/lxc-scripts/etc/cron.hourly/* /etc/cron.hourly/
ln -sf /opt/apps/lxc-scripts/etc/cron.daily/* /etc/cron.daily/
ln -sfv /opt/apps/lxc-scripts/etc/cron.hourly/* /etc/cron.hourly/
ln -sfv /opt/apps/lxc-scripts/etc/cron.daily/* /etc/cron.daily/
if ! grep ^BACKUP_LXC_PATH= /etc/default/lxc >/dev/null 2>&1; then
echo "BACKUP_LXC_PATH=/var/backups/lxc" >> /etc/default/lxc
fi
if ! grep ^BACKUP_SNAPSHOT_PATH= /etc/default/datastore >/dev/null 2>&1; then
echo "BACKUP_SNAPSHOT_PATH=/var/backups/snapshot" >> /etc/default/datastore
fi
##
## Mirror dir's logrotate and rsyslog's entry
##
mkdir -p /etc/mirror-dir
cat <<EOF > /etc/mirror-dir/config.yml
default:
sources:
- /etc
- /opt
- /root
- /var/log
- /var/lib/lxc
- /home
- /boot
- /srv
- /var/backups/lxc/latest
/var/lib/lxc:
exclude:
- /*/rootfs
EOF
cat <<EOF > /etc/rsyslog.d/mirror-dir.conf
if \$programname == 'mirror-dir' then {
action(type="omfile" file="/var/log/mirror-dir.log")
stop
}
EOF
service rsyslog restart
cat <<EOF > /etc/logrotate.d/mirror-dir.log
/var/log/mirror-dir.log
{
weekly
missingok
dateext
dateyesterday
dateformat _%Y-%m-%d
extension .log
rotate 52
compress
delaycompress
notifempty
create 640 root root
sharedscripts
postrotate
reload rsyslog >/dev/null 2>&1 || true
endscript
}
EOF

2
precise/mirror/hooks/install

@ -7,7 +7,7 @@ set -eux # -x for verbose logging to juju debug-log
## kal-manage provides the script /usr/lib/kal/dusk/sbin/ssh-cmd-validate ## kal-manage provides the script /usr/lib/kal/dusk/sbin/ssh-cmd-validate
## used to validate any entrant connection to SSH. ## used to validate any entrant connection to SSH.
apt-get install -y --force-yes rsync kal-manage
apt-get install -y rsync kal-manage
mkdir -p /var/mirror mkdir -p /var/mirror

2
precise/pypi-cacher/hooks/install

@ -2,7 +2,7 @@
set -eux set -eux
apt-get install -y --force-yes python-pip
apt-get install -y python-pip
pip install devpi-server pip install devpi-server

2
precise/svn/hooks/install

@ -3,7 +3,7 @@
set -eux # -x for verbose logging to juju debug-log set -eux # -x for verbose logging to juju debug-log
apt-get install -y --force-yes kal-shlib-pretty kal-scripts subversion
apt-get install -y kal-shlib-pretty kal-scripts subversion
mkdir -p /var/svn mkdir -p /var/svn

8
precise/vpn/hooks/install

@ -2,7 +2,7 @@
set -eux set -eux
apt-get install -y --force-yes wget git kal-scripts python
apt-get install -y wget git kal-scripts python
if test -z "${RELEASE:-}"; then if test -z "${RELEASE:-}"; then
if type -p lsb_release; then if type -p lsb_release; then
@ -19,7 +19,7 @@ fi
# ## Update only this repo: # ## Update only this repo:
# apt-get update -o Dir::Etc::sourcelist="sources.list.d/swupdate.openvpn.net.list" \ # apt-get update -o Dir::Etc::sourcelist="sources.list.d/swupdate.openvpn.net.list" \
# -o Dir::Etc::sourceparts="-" -o APT::Get::List-Cleanup="0" # -o Dir::Etc::sourceparts="-" -o APT::Get::List-Cleanup="0"
# apt-get -y --force-yes install openvpn
# apt-get -y install openvpn
export DEBIAN_FRONTEND=noninteractive DEBCONF_NONINTERACTIVE_SEEN=true export DEBIAN_FRONTEND=noninteractive DEBCONF_NONINTERACTIVE_SEEN=true
dpkg -i deb/openvpn_*.deb || true dpkg -i deb/openvpn_*.deb || true
@ -45,7 +45,7 @@ mkdir -p /var/run/openvpn /var/log/openvpn
mkdir -p /opt/apps mkdir -p /opt/apps
( (
apt-get install -y --force-yes python-setuptools python-twisted python-crypto python-yaml python-pyptlib
apt-get install -y python-setuptools python-twisted python-crypto python-yaml python-pyptlib
cd /opt/apps && cd /opt/apps &&
git clone https://git.torproject.org/pluggable-transports/obfsproxy.git && git clone https://git.torproject.org/pluggable-transports/obfsproxy.git &&
cd obfsproxy && cd obfsproxy &&
@ -54,7 +54,7 @@ mkdir -p /opt/apps
## obfs4proxy does not work with OpenVPN for now. ## obfs4proxy does not work with OpenVPN for now.
# ( # (
# apt-get install --force-yes -y golang &&
# apt-get install -y golang &&
# cd /opt/apps && # cd /opt/apps &&
# mkdir obfs4 && # mkdir obfs4 &&
# cd obfs4 && # cd obfs4 &&

60
rocketchat/README.org

@ -0,0 +1,60 @@
# -*- ispell-local-dictionary: "english" -*-
#+SETUPFILE: ~/.emacs.d/etc/setup/latex.setup
#+SETUPFILE: ~/.emacs.d/etc/setup/html-readtheorg-local.setup
#+TITLE: Rocket.Chat
* Updating the charm to a new version
We are using official image. Latest tags usually.
** Test new version
Rocket.chat has a powerfull and working database update mecanism that
will take care of migrating database on startup.
*** Get latest available versions
You can double-check available candidate for official images like this:
#+begin_src sh
docker-tags-fetch rocketchat/rocket.chat -l 15 -f "^[0-9]+\.[0-9]+\.[0-9]+$" | sort -rV
#+end_src
Check/Choose the version you want to test.
*** Modify your own =compose.yml=
By adding these 2 lines in your rocket chat service:
#+begin_src yaml
docker-compose:
image: rocketchat/rocket.chat:X.Y.Z
#+end_src
Replace X.Y.Z by the target version you want to test.
Launch =compose up=.
Be ready to wait a few minutes after =compose up= finished before the
service to be available: rocketchat is expected to take some time to
migrate.
** Change the current charm to include new version
To prepare the commit for next version, you can run the following
on the repository you'll use to push the new commit.
#+begin_src sh
BASENAME=rocketchat/rocket.chat
VERSION=$(docker-tags-fetch "$BASENAME" -l 15 -f "^[0-9]+\.[0-9]+\.[0-9]+$" | sort -rV | head -n 1)
echo Last version of rocket chat: $VERSION
docker pull rocketchat/rocket.chat:"$VERSION" &&
docker tag rocketchat/rocket.chat:"$VERSION" docker.0k.io/rocketchat:"$VERSION" &&
docker push docker.0k.io/rocketchat:"$VERSION" &&
sed -ri "s%^(docker-image: docker.0k.io/rocketchat:).*%\1$VERSION%" metadata.yml &&
sed -ri "s%^(#docker-image: rocketchat/rocket.chat:).*%\1$VERSION%" metadata.yml
#+end_src
You can review the changes and commit them.

4
rocketchat/metadata.yml

@ -1,7 +1,7 @@
summary: "Rocket Chat server" summary: "Rocket Chat server"
maintainer: "Valentin Lab <valentin.lab@kalysto.org>" maintainer: "Valentin Lab <valentin.lab@kalysto.org>"
#docker-image: rocket.chat:3.6.3
docker-image: docker.0k.io/rocketchat:3.6.3
#docker-image: rocketchat/rocket.chat:3.18.1
docker-image: docker.0k.io/rocketchat:3.18.1
data-resources: data-resources:
- /app/uploads - /app/uploads
uses: uses:

125
rsync-backup-target/README.org

@ -0,0 +1,125 @@
#+PROPERTY: Effort_ALL 0 0:30 1:00 2:00 0.5d 1d 1.5d 2d 3d 4d 5d
#+PROPERTY: Max_effort_ALL 0 0:30 1:00 2:00 0.5d 1d 1.5d 2d 3d 4d 5d
#+PROPERTY: header-args:python :var filename=(buffer-file-name)
#+PROPERTY: header-args:sh :var filename=(buffer-file-name)
#+TODO: TODO WIP BLOCKED | DONE CANCELED
#+LATEX_HEADER: \usepackage[margin=0.5in]{geometry}
#+LaTeX_HEADER: \hypersetup{linktoc = all, colorlinks = true, urlcolor = DodgerBlue4, citecolor = PaleGreen1, linkcolor = blue}
#+LaTeX_CLASS: article
#+OPTIONS: H:8 ^:nil prop:("Effort" "Max_effort") tags:not-in-toc
#+COLUMNS: %50ITEM %Effort(Min Effort) %Max_effort(Max Effort)
#+TITLE: rsync-backup-target
#+LATEX: \pagebreak
Usage of this service
#+LATEX: \pagebreak
#+LATEX: \pagebreak
* Configuration example
#+begin_src yaml
rsync-backup-target:
# docker-compose:
# ports:
# - "10023:22"
options:
admin: ## These keys are for the allowed rsync-backup to write stuff with rsync
myadmin:
- "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDESdz8bWtVcDQJ68IE/KpuZM9tAq\
ZDXGbvEVnTg16/yWqBGQg0QZdDjISsPn7D3Zr64g2qgD9n7EZghfGP9TkitvfrBYx8p\
7JkkUyt8nxklwOlKZFD5b3PF2bHloSsmjnP8ZMp5Ar7E+tn1guGrCrTcFIebpVGR3qF\
hRN9AlWNR+ekWo88ZlLJIrqD26jbWRJZm4nPCgqwhJwfHE3aVwfWGOqjSp4ij+jr2ac\
Arg7eD4clBPYIqKlqbfNRD5MFAH9sbB6jkebQCAUwNRwV7pKwCEt79HnCMoMjnZh6Ww\
6TlHIFw936C2ZiTBuofMx7yoAeqpifyzz/T5wsFLYWwSnX rsync@zen"
#+end_src
* ssh API
** Adding new keys for backup
This can be done through the admin accounts configured in =compose.yml=.
You can use then =ssh myadmin@$RSYNC_BACKUP_TARGET ssh-key=:
#+begin_example
$ ssh myadmin@$RSYNC_BACKUP_TARGET ssh-key ls
$ ssh myadmin@$RSYNC_BACKUP_TARGET ssh-key add "ssh-rsa AAA...Jdhwhv rsync@sourcelabel"
$ ssh myadmin@$RSYNC_BACKUP_TARGET ssh-key ls
..Jdhwhv sourcelabel
$ ssh myadmin@$RSYNC_BACKUP_TARGET ssh-key rm sourcelabel
$ ssh myadmin@$RSYNC_BACKUP_TARGET ssh-key ls
$
#+end_example
** Requesting a recover only key
*** as an admin
As an admin, by requesting a recover-only key on an ident that you
own, you are allowed to read (and only read) the content of the given
ident. This will allow you to give the credentials to any new host to
have a direct read access so-as to deploy the backup on a new host.
#+begin_example
$ ssh myadmin@$RSYNC_BACKUP_TARGET ssh-key request-recovery-key myident > /tmp/private_key
$ chmod 500 /tmp/private_key
$ rsync -e "ssh -p 22 -i /tmp/private_key -l rsync" \
-azvArH --delete --delete-excluded \
--partial --partial-dir .rsync-partial \
--numeric-ids $RSYNC_BACKUP_TARGET:/var/mirror/myident/etc/ /tmp/etc
#+end_example
This key will expire after 15 mn of the last recovery.
*** as a standard backup account
With a standard backup account, you can log on as =rsync= user and
request without any arguments a recovery key. Indeed, every standard
backup account is tied to one backup identifier only. So the recover
key received will be for this backup identifier only.
You'll probably want to use the received key from another computer to
restore the backup for instance.
#+begin_example
$ ssh rsync@$RSYNC_BACKUP_TARGET request-recovery-key > /tmp/private_key
$ chmod 500 /tmp/private_key
$ rsync -e "ssh -p 22 -i /tmp/private_key -l rsync" \
-azvArH --delete --delete-excluded \
--partial --partial-dir .rsync-partial \
--numeric-ids $RSYNC_BACKUP_TARGET:/var/mirror/myident/etc/ /tmp/etc
#+end_example
* Troubleshooting
** Faking access from client
This should work:
#+begin_src sh
RSYNC_BACKUP_TARGET_IP=172.18.0.2
rsync -azvA -e "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" \
/tmp/toto "$RSYNC_BACKUP_TARGET":/var/mirror/client1
#+end_src
** Direct ssh access should be refused
#+begin_src sh
RSYNC_BACKUP_TARGET_IP=172.18.0.2
ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \
"$RSYNC_BACKUP_TARGET"
#+end_src
** Wrong directory should be refused
#+begin_src sh
RSYNC_BACKUP_TARGET_IP=172.18.0.2
rsync -azvA -e "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" \
/tmp/toto "$RSYNC_BACKUP_TARGET":/var/mirror/client2
#+end_src

10
rsync-backup-target/build/Dockerfile

@ -2,16 +2,18 @@ FROM alpine:3.9
MAINTAINER Valentin Lab <valentin.lab@kalysto.org> MAINTAINER Valentin Lab <valentin.lab@kalysto.org>
RUN apk add rsync sudo bash openssh-server
## coreutils is for ``date`` support of ``--rfc-3339=seconds`` argument.
## findutils is for ``find`` support of ``--newermt`` argument.
RUN apk add rsync sudo bash openssh-server coreutils findutils
RUN ssh-keygen -A RUN ssh-keygen -A
## New user/group rsync/rsync with home dir in /var/lib/rsync ## New user/group rsync/rsync with home dir in /var/lib/rsync
RUN mkdir -p /var/lib/rsync && \
RUN mkdir -p /var/lib/rsync /var/log/rsync && \
addgroup -S rsync && \ addgroup -S rsync && \
adduser -S rsync -h /var/lib/rsync -G rsync && \ adduser -S rsync -h /var/lib/rsync -G rsync && \
chown rsync:rsync /var/lib/rsync
chown rsync:rsync /var/lib/rsync /var/log/rsync
## Without this, account is concidered locked by SSH
## Without this, account is considered locked by SSH
RUN sed -ri 's/^rsync:!:/rsync:*NP*:/g' /etc/shadow RUN sed -ri 's/^rsync:!:/rsync:*NP*:/g' /etc/shadow
## Withouth this, force-command will not run ## Withouth this, force-command will not run

42
rsync-backup-target/build/entrypoint.sh

@ -12,18 +12,36 @@ RSYNC_HOME=/var/lib/rsync
mkdir -p "$RSYNC_HOME/.ssh" mkdir -p "$RSYNC_HOME/.ssh"
for f in "$KEYS"/*.pub; do
[ -e "$f" ] || continue
content=$(cat "$f")
ident="${f##*/}"
ident="${ident%.pub}"
if ! [[ "$ident" =~ ^[a-zA-Z0-9._-]+$ ]]; then
echo "bad: '$ident'"
continue
fi
echo "command=\"/usr/local/sbin/ssh-cmd-validate \\\"$ident\\\"\",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty $content"
done > "$RSYNC_HOME"/.ssh/authorized_keys
chown rsync:rsync -R "$RSYNC_HOME"/.ssh -R
if ! egrep '^[^:]+:x:101:101:' /etc/passwd; then
## Then it is a first run of this container, users
## need to be created. Notice that container will be
## re-created anew if user config was changed.
for user_dir in /etc/rsync/keys/admin/* /etc/rsync/keys/recover; do
[ -d "$user_dir" ] || continue
user="${user_dir##*/}"
[ "$user" != "rsync" ] || continue
adduser -S "$user" -h "$user_dir" -G rsync &&
chown "$user":rsync "$user_dir" || {
echo "Error: couldn't create user $user or chown '$user_dir'." >&2
exit 1
}
## Without this, account is considered locked by SSH
sed -ri "s/^$user:\!:/$user:*NP*:/g" /etc/shadow
## Withouth this, force-command will not run
sed -ri "s%^($user.*:)[^:]+$%\1/bin/bash%g" /etc/passwd
done
fi
log="/var/log/rsync/ssh-admin-cmd-validate.log"
touch "$log"
chown rsync:rsync "$log"
chmod g+rw "$log"
ssh-update-keys
## Give back PID 1 so that ssh can receive signals ## Give back PID 1 so that ssh can receive signals
exec /usr/sbin/sshd -D -e exec /usr/sbin/sshd -D -e

7
rsync-backup-target/build/src/etc/sudoers.d/recover

@ -0,0 +1,7 @@
## allow admin users to request a recovery key, this is really not
## sufficient, but the real check is done on the
## ``ssh-admin-cmd-validate`` side.
%rsync ALL=(root) NOPASSWD: /usr/local/sbin/request-recovery-key *
%rsync ALL=(root) NOPASSWD: /bin/touch /etc/rsync/keys/recover/*
%rsync ALL=(root) NOPASSWD: /usr/local/sbin/ssh-update-keys

3
rsync-backup-target/build/src/etc/sudoers.d/rsync

@ -2,3 +2,6 @@
## the real check is done on the ``ssh-cmd-validate`` side. ## the real check is done on the ``ssh-cmd-validate`` side.
rsync ALL=(root) NOPASSWD: /usr/bin/rsync --server * . /var/mirror/* rsync ALL=(root) NOPASSWD: /usr/bin/rsync --server * . /var/mirror/*
%rsync ALL=(root) NOPASSWD: /usr/local/sbin/ssh-key *
%rsync ALL=(root) NOPASSWD: /usr/local/sbin/ssh-update-keys

76
rsync-backup-target/build/src/usr/local/sbin/request-recovery-key

@ -0,0 +1,76 @@
#!/bin/bash
RSYNC_KEY_PATH=/etc/rsync/keys
RECOVER_KEY_PATH=${RSYNC_KEY_PATH}/recover
ANSI_ESC=$'\e['
NORMAL="${ANSI_ESC}0m"
GRAY="${ANSI_ESC}1;30m"
RED="${ANSI_ESC}1;31m"
GREEN="${ANSI_ESC}1;32m"
YELLOW="${ANSI_ESC}1;33m"
BLUE="${ANSI_ESC}1;34m"
PINK="${ANSI_ESC}1;35m"
CYAN="${ANSI_ESC}1;36m"
WHITE="${ANSI_ESC}1;37m"
DARKGRAY="${ANSI_ESC}0;30m"
DARKRED="${ANSI_ESC}0;31m"
DARKGREEN="${ANSI_ESC}0;32m"
DARKYELLOW="${ANSI_ESC}0;33m"
DARKBLUE="${ANSI_ESC}0;34m"
DARKPINK="${ANSI_ESC}0;35m"
DARKCYAN="${ANSI_ESC}0;36m"
DARKWHITE="${ANSI_ESC}0;37m"
ssh:mk-private-key() {
local comment="$1"
(
tmpdir=$(mktemp -d)
chmod go-rwx "$tmpdir"
ssh-keygen -t rsa -N "" -f "$tmpdir/rsync_rsa" -C "$service_name@$host" >/dev/null
cat "$tmpdir/rsync_rsa"
rm -rf "$tmpdir"
)
}
md5() {
local md5
md5=$(cat | md5sum)
echo "${md5%% *}"
}
request-recovery-key() {
local label="$1" ident="$2" key public_key
## Admin should have claimed the ident with at least one backup key
if [ -n "$label" ] && ! [ -e "${RSYNC_KEY_PATH}/backup/$label/$ident.pub" ]; then
echo "Error: Current admin '$label' has no ident '$ident' claimed." >&2
return 1
fi
## Find new label
while true; do
key=$(ssh:mk-private-key "recover@$ident")
md5=$(printf "%s" "$key" | md5)
[ -e "${RECOVER_KEY_PATH}/$md5" ] || break
done
mkdir -p "${RECOVER_KEY_PATH}"
public_key=$(ssh-keygen -y -f <(printf "%s\n" "$key"))
printf "%s %s\n" "$public_key" "recover@$ident" > "${RECOVER_KEY_PATH}/$md5.pub"
touch "${RECOVER_KEY_PATH}/$md5"
chmod go-rwx "${RECOVER_KEY_PATH}/$md5"
printf "%s\n" "$key" | tee -a "${RECOVER_KEY_PATH}/$md5"
/usr/local/sbin/ssh-update-keys
}
request-recovery-key "$@"

106
rsync-backup-target/build/src/usr/local/sbin/ssh-admin-cmd-validate

@ -0,0 +1,106 @@
#!/bin/bash
## Note that the shebang is not used, but it's the login shell that
## will execute this command.
exname=$(basename "$0")
mkdir -p /var/log/rsync
LOG="/var/log/rsync/$exname.log"
ssh_connection=(${SSH_CONNECTION})
SSH_SOURCE_IP="${ssh_connection[0]}:${ssh_connection[1]}"
log() {
printf "%s [%s] %s - %s\n" \
"$(date --rfc-3339=seconds)" "$$" "$SSH_SOURCE_IP" "$*" \
>> "$LOG"
}
log "NEW ADMIN CONNECTION"
if [ -z "$1" ] || ! [[ "$1" =~ ^[a-zA-Z0-9._-]+$ ]]; then
log "INVALID SETUP, ARG IS: '$1'"
echo "Your command has been rejected. Contact administrator."
exit 1
fi
label="$1"
reject() {
log "REJECTED: $SSH_ORIGINAL_COMMAND"
# echo "ORIG: $SSH_ORIGINAL_COMMAND" >&2
echo "Your command has been rejected and reported to sys admin." >&2
exit 1
}
if [[ "$SSH_ORIGINAL_COMMAND" =~ [\&\(\{\;\<\>\`\$\}] ]]; then
log "BAD CHARS DETECTED"
# echo "Bad chars: $SSH_ORIGINAL_COMMAND" >&2
reject
fi
if [[ "$SSH_ORIGINAL_COMMAND" =~ ^"ssh-key add ssh-rsa "[a-zA-Z0-9/+]+" "[a-zA-Z0-9._-]+"@"[a-zA-Z0-9._-]+""$ ]]; then
log "ACCEPTED: $SSH_ORIGINAL_COMMAND"
## Interpret \ to allow passing spaces (want to avoid possible issue with \n)
#read -a ssh_args <<< "${SSH_ORIGINAL_COMMAND}"
ssh_args=(${SSH_ORIGINAL_COMMAND})
# echo "Would accept: $SSH_ORIGINAL_COMMAND" >&2
exec sudo /usr/local/sbin/ssh-key add "$label" "${ssh_args[@]:2}"
elif [[ "$SSH_ORIGINAL_COMMAND" =~ ^"ssh-key ls"$ ]]; then
log "ACCEPTED: $SSH_ORIGINAL_COMMAND"
## Interpret \ to allow passing spaces (want to avoid possible issue with \n)
#read -a ssh_args <<< "${SSH_ORIGINAL_COMMAND}"
ssh_args=(${SSH_ORIGINAL_COMMAND})
# echo "Would accept: $SSH_ORIGINAL_COMMAND" >&2
exec /usr/local/sbin/ssh-key ls "$label" "${ssh_args[@]:2}"
elif [[ "$SSH_ORIGINAL_COMMAND" =~ ^"ssh-key rm "[a-zA-Z0-9._-]+$ ]]; then
log "ACCEPTED: $SSH_ORIGINAL_COMMAND"
## Interpret \ to allow passing spaces (want to avoid possible issue with \n)
#read -a ssh_args <<< "${SSH_ORIGINAL_COMMAND}"
ssh_args=(${SSH_ORIGINAL_COMMAND})
# echo "Would accept: $SSH_ORIGINAL_COMMAND" >&2
exec sudo /usr/local/sbin/ssh-key rm "$label" "${ssh_args[@]:2}"
elif [[ "$SSH_ORIGINAL_COMMAND" =~ ^"ssh-key get-type "[a-zA-Z0-9._-]+$ ]]; then
log "ACCEPTED: $SSH_ORIGINAL_COMMAND"
## Interpret \ to allow passing spaces (want to avoid possible issue with \n)
#read -a ssh_args <<< "${SSH_ORIGINAL_COMMAND}"
ssh_args=(${SSH_ORIGINAL_COMMAND})
# echo "Would accept: $SSH_ORIGINAL_COMMAND" >&2
exec sudo /usr/local/sbin/ssh-key get-type "$label" "${ssh_args[@]:2}"
elif [[ "$SSH_ORIGINAL_COMMAND" =~ ^"request-recovery-key "[a-zA-Z0-9._-]+$ ]]; then
log "ACCEPTED: $SSH_ORIGINAL_COMMAND"
## Interpret \ to allow passing spaces (want to avoid possible issue with \n)
#read -a ssh_args <<< "${SSH_ORIGINAL_COMMAND}"
ssh_args=(${SSH_ORIGINAL_COMMAND})
# echo "Would accept: $SSH_ORIGINAL_COMMAND" >&2
exec sudo /usr/local/sbin/request-recovery-key "$label" "${ssh_args[@]:1}"
else
log "NOT MATCHING ANY ALLOWED COMMAND"
reject
fi
## For other commands, like `find` or `md5`, that could be used to
## challenge the backups and check that archive is actually
## functional, I would suggest to write a simple command that takes no
## arguments, so as to prevent allowing wildcards or suspicious
## contents. Letting `find` go through is dangerous for instance
## because of the `-exec`. And path traversal can be done also when
## allowing /my/path/* by using '..'. This is why a fixed purpose
## embedded executable will be much simpler to handle, and to be honest
## we don't need much more.

66
rsync-backup-target/build/src/usr/local/sbin/ssh-cmd-validate

@ -5,30 +5,84 @@
exname=$(basename "$0") exname=$(basename "$0")
mkdir -p /var/log/rsync
LOG="/var/log/rsync/$exname.log"
ssh_connection=(${SSH_CONNECTION})
SSH_SOURCE_IP="${ssh_connection[0]}:${ssh_connection[1]}"
log() {
printf "%s [%s] %s - %s\n" \
"$(date --rfc-3339=seconds)" "$$" "$SSH_SOURCE_IP" "$*" \
>> "$LOG"
}
log "NEW BACKUP CONNECTION"
if [ -z "$1" ] || ! [[ "$1" =~ ^[a-zA-Z0-9._-]+$ ]]; then if [ -z "$1" ] || ! [[ "$1" =~ ^[a-zA-Z0-9._-]+$ ]]; then
logger -t "$exname" "INVALID SETUP, ARG IS: '$1'"
log "INVALID SETUP, ARG IS: '$1'"
echo "Your command has been rejected. Contact administrator." echo "Your command has been rejected. Contact administrator."
exit 1 exit 1
fi fi
ident="$1"
log "IDENTIFIED AS $ident"
reject() { reject() {
logger -t "$exname" "REJECTED: $SSH_ORIGINAL_COMMAND"
log "REJECTED: $SSH_ORIGINAL_COMMAND"
# echo "ORIG: $SSH_ORIGINAL_COMMAND" >&2 # echo "ORIG: $SSH_ORIGINAL_COMMAND" >&2
echo "Your command has been rejected and reported to sys admin." >&2 echo "Your command has been rejected and reported to sys admin." >&2
exit 1 exit 1
} }
sudo /usr/local/sbin/ssh-update-keys
if [[ "$SSH_ORIGINAL_COMMAND" =~ [\&\(\{\;\<\>\`\$\}] ]]; then if [[ "$SSH_ORIGINAL_COMMAND" =~ [\&\(\{\;\<\>\`\$\}] ]]; then
log "BAD CHARS DETECTED"
# echo "Bad chars: $SSH_ORIGINAL_COMMAND" >&2 # echo "Bad chars: $SSH_ORIGINAL_COMMAND" >&2
reject reject
fi fi
if [[ "$SSH_ORIGINAL_COMMAND" =~ ^"rsync --server -"[vloHgDtpArRzCeiLsfx\.]+(" --"[a-z-]+|" --partial-dir .rsync-partial")*" . /var/mirror/$1"$ ]]; then
logger -t "$exname" "ACCEPTED: $SSH_ORIGINAL_COMMAND"
# echo "Would accept: $SSH_ORIGINAL_COMMAND" >&2
exec sudo $SSH_ORIGINAL_COMMAND
if [[ "$SSH_ORIGINAL_COMMAND" =~ ^"rsync --server -"[vnloHgDtpArRzCeiLsfx\.]+(" --"[a-z=%-]+|" --partial-dir .rsync-partial")*" . /var/mirror/$ident"$ ]]; then
log "ACCEPTED BACKUP COMMAND: $SSH_ORIGINAL_COMMAND"
## Interpret \ to allow passing spaces (want to avoid possible issue with \n)
#read -a ssh_args <<< "${SSH_ORIGINAL_COMMAND}"
ssh_args=(${SSH_ORIGINAL_COMMAND})
exec sudo "${ssh_args[@]::3}" \
"--log-file=/var/log/rsync/target_$1_rsync.log" \
"--log-file-format=%i %o %f %l %b" \
"${ssh_args[@]:3}"
elif [[ "$SSH_ORIGINAL_COMMAND" =~ ^"rsync --server --sender -"[vnloHgDtpArRzCeiLsfx\.]+(" --"[a-z=%-]+|" --partial-dir .rsync-partial")*" . /var/mirror/$ident"(|/.*)$ ]]; then
## Interpret \ to allow passing spaces (want to avoid possible issue with \n)
#read -a ssh_args <<< "${SSH_ORIGINAL_COMMAND}"
ssh_args=(${SSH_ORIGINAL_COMMAND})
last_arg="${ssh_args[@]: -1:1}"
if ! new_path=$(realpath "$last_arg" 2>/dev/null); then
log "FINAL PATH INVALID"
reject
fi
if [[ "$new_path" != "$last_arg" ]] &&
[[ "$new_path" != "/var/mirror/$ident/"* ]] &&
[[ "$new_path" != "/var/mirror/$ident" ]]; then
log "FINAL PATH SUSPICIOUS"
reject
fi
log "ACCEPTED RECOVER COMMAND: $SSH_ORIGINAL_COMMAND"
exec sudo "${ssh_args[@]}"
elif [[ "$SSH_ORIGINAL_COMMAND" =~ ^"request-recovery-key"$ ]]; then
log "ACCEPTED RECOVERY KEY REQUEST: $SSH_ORIGINAL_COMMAND"
exec sudo /usr/local/sbin/request-recovery-key "" "$ident"
else else
log "REFUSED COMMAND AS IT DOESN'T MATCH ANY EXPECTED COMMAND"
reject reject
fi fi

152
rsync-backup-target/build/src/usr/local/sbin/ssh-key

@ -0,0 +1,152 @@
#!/bin/bash
RSYNC_KEY_PATH=/etc/rsync/keys
ANSI_ESC=$'\e['
NORMAL="${ANSI_ESC}0m"
GRAY="${ANSI_ESC}1;30m"
RED="${ANSI_ESC}1;31m"
GREEN="${ANSI_ESC}1;32m"
YELLOW="${ANSI_ESC}1;33m"
BLUE="${ANSI_ESC}1;34m"
PINK="${ANSI_ESC}1;35m"
CYAN="${ANSI_ESC}1;36m"
WHITE="${ANSI_ESC}1;37m"
DARKGRAY="${ANSI_ESC}0;30m"
DARKRED="${ANSI_ESC}0;31m"
DARKGREEN="${ANSI_ESC}0;32m"
DARKYELLOW="${ANSI_ESC}0;33m"
DARKBLUE="${ANSI_ESC}0;34m"
DARKPINK="${ANSI_ESC}0;35m"
DARKCYAN="${ANSI_ESC}0;36m"
DARKWHITE="${ANSI_ESC}0;37m"
ssh-key-ls() {
local label="$1" f content
for f in "${RSYNC_KEY_PATH}"/backup/"$label"/*.pub; do
[ -e "$f" ] || continue
ident=${f##*/}
ident=${ident%.pub}
content=$(cat "$f")
key=${content#* }
key=${key% *}
printf "${DARKGRAY}..${NORMAL}%24s ${DARKCYAN}%s${NORMAL}\n" "${key: -24}" "$ident"
done
}
ssh-key-rm() {
local label="$1" ident="$2" delete
delete="${RSYNC_KEY_PATH}/backup/$label/$ident.pub"
if ! [ -e "$delete" ]; then
echo "Error: key '$ident' not found." >&2
return 1
fi
rm "$delete"
/usr/local/sbin/ssh-update-keys
}
ssh-key-get-type() {
local label="$1" ident="$2" key content commentary
key="${RSYNC_KEY_PATH}/backup/$label/$ident.pub"
if ! [ -e "$key" ]; then
echo "Error: key '$ident' not found." >&2
return 1
fi
content=$(cat "$key") || return 1
commentary=${content##* }
printf "%s\n" "${commentary%%@*}"
}
ssh-key-add() {
local label="$1" type="$2" key="$3" email="$4"
[ "$type" == "ssh-rsa" ] || {
echo "Error: expecting ssh-rsa key type" >&2
return 1
}
## ident are unique by construction (they are struct keys)
## but keys need to be also unique
declare -A keys
content="$type $key $email"
ident="${email##*@}"
target="${RSYNC_KEY_PATH}/backup/$label/$ident.pub"
## is key used already ? As key give access to a specified subdir,
## we need to make sure it is unique.
for key_file in "${RSYNC_KEY_PATH}/backup/"*/*.pub; do
[ -e "$key_file" ] || continue
key_content=$(cat "$key_file")
if [ "$type $key" == "${key_content% *}" ]; then
if [ "$key_file" == "$target" ]; then
echo "Provided key already present for '$ident'." >&2
return 0
elif [[ "$key_file" == "${RSYNC_KEY_PATH}/"*"/$label/"*.pub ]]; then
type=${key_file#"${RSYNC_KEY_PATH}/"}
type=${type%"/$label/"*.pub}
key_ident=${key_file##*/}
key_ident=${key_ident%.pub}
echo "Provided key already used as $type key for '$key_ident'." >&2
return 1
else
olabel=${key_file#"${RSYNC_KEY_PATH}/"*/}
olabel=${olabel%/*.pub}
echo "Specified key is already used by '$olabel' account, please pick another one." >&2
return 1
fi
fi
done
mkdir -p "${target%/*}"
if [ -e "$target" ]; then
echo "Replacing key for '$ident'." >&2
elif [ -e "${RSYNC_KEY_PATH}/"*"/"*"/$ident.pub" ]; then
olabel=("${RSYNC_KEY_PATH}/"*"/"*"/$ident.pub")
olabel="${olabel[0]}"
olabel=${olabel#"${RSYNC_KEY_PATH}/"*/}
olabel=${olabel%/*.pub}
echo "ident '$ident' is already reserved by '$olabel', please pick another one." >&2
return 1
fi
echo "$content" > "$target"
/usr/local/sbin/ssh-update-keys
}
case "$1" in
"add")
shift
ssh-key-add "$@"
;;
"rm")
shift
ssh-key-rm "$@"
;;
"ls")
shift
ssh-key-ls "$@"
;;
"get-type")
shift
ssh-key-get-type "$@"
;;
*)
echo "Unknown command '$1'."
;;
esac

97
rsync-backup-target/build/src/usr/local/sbin/ssh-recover-cmd-validate

@ -0,0 +1,97 @@
#!/bin/bash
## Note that the shebang is not used, but it's the login shell that
## will execute this command.
RSYNC_KEY_PATH=/etc/rsync/keys
RECOVER_KEY_PATH=${RSYNC_KEY_PATH}/recover
exname=$(basename "$0")
mkdir -p /var/log/rsync
LOG="/var/log/rsync/$exname.log"
ssh_connection=(${SSH_CONNECTION})
SSH_SOURCE_IP="${ssh_connection[0]}:${ssh_connection[1]}"
log() {
printf "%s [%s] %s - %s\n" \
"$(date --rfc-3339=seconds)" "$$" "$SSH_SOURCE_IP" "$*" \
>> "$LOG"
}
log "NEW RECOVER CONNECTION"
if [ -z "$1" ] || ! [[ "$1" =~ ^[a-z0-9]+$ ]]; then
log "INVALID SETUP, ARG 1 SHOULD BE MD5 AND IS: '$1'"
echo "Your command has been rejected. Contact administrator."
exit 1
fi
md5="$1"
log "RECOVER KEY $md5"
if [ -z "$2" ] || ! [[ "$2" =~ ^[a-zA-Z0-9._-]+$ ]]; then
log "INVALID SETUP, IDENT IS: '$1'"
echo "Your command has been rejected. Contact administrator."
exit 1
fi
ident="$2"
log "IDENTIFIED AS $ident"
reject() {
log "REJECTED: $SSH_ORIGINAL_COMMAND"
# echo "ORIG: $SSH_ORIGINAL_COMMAND" >&2
echo "Your command has been rejected and reported to sys admin." >&2
exit 1
}
if [[ "$SSH_ORIGINAL_COMMAND" =~ [\&\(\{\;\<\>\`\$\}] ]]; then
log "BAD CHARS DETECTED"
# echo "Bad chars: $SSH_ORIGINAL_COMMAND" >&2
reject
fi
if [[ "$SSH_ORIGINAL_COMMAND" =~ ^"rsync --server --sender -"[vnldHogDtpArRze\.iLsfxC]+(" --"[a-z=%-]+|" --partial-dir .rsync-partial")*" . /var/mirror/$ident"(|/.*)$ ]]; then
## Interpret \ to allow passing spaces (want to avoid possible issue with \n)
#read -a ssh_args <<< "${SSH_ORIGINAL_COMMAND}"
ssh_args=(${SSH_ORIGINAL_COMMAND})
last_arg="${ssh_args[@]: -1:1}"
if ! new_path=$(realpath "$last_arg" 2>/dev/null); then
log "FINAL PATH INVALID"
reject
fi
if [[ "$new_path" != "$last_arg" ]] &&
[[ "$new_path" != "/var/mirror/$ident/"* ]] &&
[[ "$new_path" != "/var/mirror/$ident" ]]; then
log "FINAL PATH SUSPICIOUS"
reject
fi
sudo /usr/local/sbin/ssh-update-keys
if ! [ -e "${RECOVER_KEY_PATH}/$md5" ]; then
log "RECOVERY KEY $md5 JUST EXPIRED"
reject
fi
log "ACCEPTED RECOVER COMMAND: $SSH_ORIGINAL_COMMAND"
sudo "${ssh_args[@]}"
errlvl="$?"
for key_file in "${RECOVER_KEY_PATH}/$md5"{,.pub}; do
[ -e "$key_file" ] || continue
sudo touch "$key_file" ## Update modified time to keep key longer
done
exit "$errlvl"
else
log "REFUSED COMMAND AS IT DOESN'T MATCH ANY EXPECTED COMMAND"
reject
fi

68
rsync-backup-target/build/src/usr/local/sbin/ssh-update-keys

@ -0,0 +1,68 @@
#!/bin/bash
## Keep in mind possible race conditions as this script will be called
## from different place to update the access tokens.
##
## Code
##
RSYNC_KEY_PATH=/etc/rsync/keys
RSYNC_HOME=/var/lib/rsync
BACKUP_KEY_PATH=${RSYNC_KEY_PATH}/backup
RECOVER_KEY_PATH=${RSYNC_KEY_PATH}/recover
mkdir -p "$RSYNC_HOME/.ssh" "$RECOVER_KEY_PATH"
## delete old recovery keys
find "${RECOVER_KEY_PATH}" \
-maxdepth 1 -not -newermt "-15 minutes" \
-type f -delete
##
## New
##
pid=$$
new="$RSYNC_HOME"/.ssh/authorized_keys.tmp."$pid"
touch "$new"
for f in "$BACKUP_KEY_PATH"/*/*.pub "$RECOVER_KEY_PATH"/*.pub; do
[ -e "$f" ] || continue
content=$(cat "$f")
if [[ "$content" == *" "*" "*@* ]]; then
ident="${content##*@}"
else
ident="${f##*/}"
ident="${ident%.pub}"
fi
if ! [[ "$ident" =~ ^[a-zA-Z0-9._-]+$ ]]; then
echo "bad: '$ident'" >&2
continue
fi
if [[ "$f" == "${RECOVER_KEY_PATH}"/*.pub ]]; then
basename=${f##*/}
basename=${basename%.pub}
cmd="/usr/local/sbin/ssh-recover-cmd-validate $basename"
else
cmd=/usr/local/sbin/ssh-cmd-validate
fi
echo "command=\"$cmd \\\"$ident\\\"\",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty $content"
done >> "$new"
[ -e "$RSYNC_HOME"/.ssh/authorized_keys ] &&
mv "$RSYNC_HOME"/.ssh/authorized_keys{,.old}
## XXXvlab: Atomic operation. It's the last call to this instruction
## that will prevail. There are some very special cases where some
## added key would not be added as expected: for instance an older
## call to ``ssh-update-key``, if made before a specific public key
## file was added to directory, could take a longer time to reach this
## next instruction than a more recent call (that would be after
## the specific public key was added).
mv "$new" "$RSYNC_HOME"/.ssh/authorized_keys
chown rsync:rsync "$RSYNC_HOME"/.ssh -R

65
rsync-backup-target/hooks/init

@ -15,41 +15,64 @@ set -e
service_def=$(get_compose_service_def "$SERVICE_NAME") service_def=$(get_compose_service_def "$SERVICE_NAME")
keys=$(echo "$service_def" | shyaml -y get-value options.keys 2>/dev/null) || {
err "You must specify a ${WHITE}keys${NORMAL} struct to use this service"
admin_keys=$(echo "$service_def" | shyaml -y get-value options.admin 2>/dev/null) || {
err "You must specify a ${WHITE}admin${NORMAL} struct to use this service"
exit 1 exit 1
} }
[ "$(echo "$keys" | shyaml -y get-type 2>/dev/null)" == "struct" ] || {
err "Invalid value type for ${WHITE}keys${NORMAL}, please provide a struct"
[ "$(echo "$admin_keys" | shyaml -y get-type 2>/dev/null)" == "struct" ] || {
err "Invalid value type for ${WHITE}admin${NORMAL}, please provide a struct"
exit 1 exit 1
} }
local_path_key=/etc/rsync/keys
host_path_key="$SERVICE_CONFIGSTORE${local_path_key}"
key_nb=0
## ident are unique by construction (they are struct keys)
## but keys need to be also unique
declare -A keys
while read-0 ident key; do
if [ "${keys[$key]}" ]; then
err "Duplicate key: key for ident '$ident' is same as ident '${keys["$key"]}'."
exit 1
fi
rebuild-config() {
rm -rf "$SERVICE_CONFIGSTORE/etc/rsync/keys/admin"
mkdir -p "$host_path_key"
while read-0 ident keys; do
ident=$(e "$ident" | shyaml get-value)
if ! [[ "$ident" =~ ^[a-zA-Z0-9._-]+$ ]]; then if ! [[ "$ident" =~ ^[a-zA-Z0-9._-]+$ ]]; then
err "Invalid identifier '$ident'," \ err "Invalid identifier '$ident'," \
"please use only alphanumerical char, dots, dash or underscores." "please use only alphanumerical char, dots, dash or underscores."
exit 1 exit 1
fi fi
debug "Creating access key for ${ident}" || true
echo "$key" | file_put "$host_path_key/${ident}.pub"
keys["$key"]="$ident"
done < <(echo "$keys" | shyaml key-values-0)
debug "Setting access keys for ${ident}"
[ "$(echo "$keys" | shyaml -y get-type 2>/dev/null)" == "sequence" ] || {
err "Invalid value type for ${WHITE}admin.$ident${NORMAL}, please provide a sequence"
echo " Received: '$keys'" >&2
exit 1
}
while read-0 key; do
echo "command=\"/usr/local/sbin/ssh-admin-cmd-validate \\\"$ident\\\"\",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty $key"
done < <(echo "$keys" | shyaml get-values-0) | file_put "$host_path_key/$ident/.ssh/authorized_keys"
done < <(echo "$admin_keys" | shyaml -y key-values-0)
e "$control_users" > "$CONTROL_USERS_FILE"
}
local_path_key=/etc/rsync/keys/admin
host_path_key="$SERVICE_CONFIGSTORE${local_path_key}"
CONTROL_USERS_FILE="$SERVICE_DATASTORE/.control-pass"
## Was it already properly propagated to database ?
control_users=$(H "${admin_keys}" "$(declare -f "rebuild-config")")
init-config-add "\ init-config-add "\
$SERVICE_NAME: $SERVICE_NAME:
volumes: volumes:
- $host_path_key:$local_path_key:ro
- $host_path_key:$local_path_key
labels:
- compose.config_hash=$control_users
" "
if [ -e "$CONTROL_USERS_FILE" ] && [ "$control_users" == "$(cat "$CONTROL_USERS_FILE")" ]; then
exit 0
fi
rebuild-config

83
rsync-backup-target/hooks/log_rotate-relation-joined

@ -0,0 +1,83 @@
#!/bin/bash
## Should be executable N time in a row with same result.
. lib/common
set -e
uid=$(docker_get_uid "$SERVICE_NAME" "rsync")
LOGS=/var/log/rsync
mkdir -p "$SERVICE_DATASTORE/$LOGS"
touch "$SERVICE_DATASTORE/$LOGS/ssh-cmd-validate.log"
chown -v "$uid" "$SERVICE_DATASTORE/$LOGS" "$SERVICE_DATASTORE/$LOGS/ssh-cmd-validate.log"
rotated_count=$(relation-get rotated-count 2>/dev/null) || true
rotated_count=${rotated_count:-52}
## XXXvlab: a lot of this intelligence should be moved away into ``logrotate`` charm
DST="$CONFIGSTORE/$TARGET_SERVICE_NAME/etc/logrotate.d/$SERVICE_NAME"
file_put "$DST" <<EOF
/var/log/docker/$SERVICE_NAME/ssh-cmd-validate.log
{
weekly
missingok
dateext
dateyesterday
dateformat _%Y-%m-%d
extension .log
rotate $rotated_count
compress
delaycompress
notifempty
create 640 $uid
sharedscripts
}
/var/log/docker/$SERVICE_NAME/ssh-admin-cmd-validate.log
{
weekly
missingok
dateext
dateyesterday
dateformat _%Y-%m-%d
extension .log
rotate $rotated_count
compress
delaycompress
notifempty
create 660 $uid
sharedscripts
}
/var/log/docker/$SERVICE_NAME/target_*_rsync.log
{
weekly
missingok
dateext
dateyesterday
dateformat _%Y-%m-%d
extension .log
rotate $rotated_count
compress
delaycompress
notifempty
create 640
sharedscripts
}
EOF
config-add "\
services:
$MASTER_TARGET_SERVICE_NAME:
volumes:
- $DST:/etc/logrotate.d/docker-${SERVICE_NAME}:ro
- $SERVICE_DATASTORE$LOGS:/var/log/docker/$SERVICE_NAME:rw
$MASTER_BASE_SERVICE_NAME:
volumes:
- $SERVICE_DATASTORE$LOGS:$LOGS:rw
"

12
rsync-backup-target/metadata.yml

@ -1,3 +1,15 @@
description: Backup Rsync over SSH Target description: Backup Rsync over SSH Target
data-resources: data-resources:
- /etc/rsync/keys
- /var/mirror - /var/mirror
- /var/log/rsync
uses:
log-rotate:
#constraint: required | recommended | optional
#auto: pair | summon | none ## default: pair
constraint: required
auto: summon
solves:
unmanaged-logs: "in docker logs"
#default-options:

40
rsync-backup-target/resources/bin/compose-add-rsync-key

@ -43,23 +43,25 @@ check_valid_yaml() {
cmdline.spec.gnu cmdline.spec.gnu
cmdline.spec.reporting cmdline.spec.reporting
service_name=${SERVICE_NAME:-rsync-backup-target}
compose_file=${COMPOSE_FILE:-/etc/compose/compose.yml}
cmdline.spec::valued:--compose-file,-f:usage() {
echo "Compose file location. Defaults to '/etc/compose/compose.yml'"; }
cmdline.spec::valued:--compose-file,-f:run() { compose_file="$1"; }
cmdline.spec::valued:--service-name,-s:usage() {
echo "YAML service name in compose file to check for existence of key. Defaults to 'rsync-backup-target'"; }
cmdline.spec::valued:--service-name,-s:run() { service_name="$1"; }
cmdline.spec::cmd:__main__:run() { cmdline.spec::cmd:__main__:run() {
: :posarg: DOMAIN 'domain identifier' : :posarg: DOMAIN 'domain identifier'
: :posarg: SSH_PUBLIC_KEY 'ssh public key' : :posarg: SSH_PUBLIC_KEY 'ssh public key'
if ! existing_domains=$(shyaml keys "${service_name//./\\.}.options.keys" < "$compose_file"); then
: :optfla: --no-reload,-R 'Prevent reloading archiving server'
: :optval: --service-name,-s "YAML service name in compose
file to check for existence of key.
Defaults to 'rsync-backup-target'"
: :optval: --compose-file,-f "Compose file location. Defaults to
'/etc/compose/compose.yml'"
local service_name compose_file
service_name=${opt_service_name:-rsync-backup-target}
compose_file=${opt_compose_file:-/etc/compose/compose.yml}
if ! existing_domains=$(shyaml keys "${service_name//./\\.}.options.admin" < "$compose_file"); then
err "Couldn't query file '$compose_file' for keys of" \ err "Couldn't query file '$compose_file' for keys of" \
"service ${DARKYELLOW}${service_name}${NORMAL}." "service ${DARKYELLOW}${service_name}${NORMAL}."
exit 1 exit 1
@ -68,14 +70,14 @@ cmdline.spec::cmd:__main__:run() {
content=$(cat "$compose_file") content=$(cat "$compose_file")
if echo "$existing_domains" | grep "^${DOMAIN}$" >/dev/null 2>&1; then if echo "$existing_domains" | grep "^${DOMAIN}$" >/dev/null 2>&1; then
if ! prev_key=$(shyaml get-value "${service_name//./\\.}.options.keys.${DOMAIN//./\\.}" \
if ! prev_key=$(shyaml get-value "${service_name//./\\.}.options.admin.${DOMAIN//./\\.}" \
< "$compose_file"); then < "$compose_file"); then
err "Couldn't query file '$compose_file' for key of domain '$DOMAIN'." err "Couldn't query file '$compose_file' for key of domain '$DOMAIN'."
exit 1 exit 1
fi fi
if [ "${prev_key}" == "$SSH_PUBLIC_KEY" ]; then if [ "${prev_key}" == "$SSH_PUBLIC_KEY" ]; then
echo "Key was already setup."
info "Key is already setup correctly."
exit 0 exit 0
fi fi
@ -123,20 +125,26 @@ EOF
exit 1 exit 1
fi fi
echo "${WHITE}Applying these changes:${NORMAL}" echo "${WHITE}Applying these changes:${NORMAL}"
if type -p colordiff >/dev/null; then
colordiff -u "$compose_file" <(echo "$content")
else
echo "$diff" echo "$diff"
fi | egrep -v "^[^ ]*(---|\+\+\+)"
cp "$compose_file" "${compose_file}.old" cp "$compose_file" "${compose_file}.old"
echo "$content" > "$compose_file" echo "$content" > "$compose_file"
if [ -z "$opt_no_reload" ]; then
## reloading (could be much faster) ## reloading (could be much faster)
compose --debug down && compose --debug up compose --debug down && compose --debug up
if [ "$?" == 0 ]; then if [ "$?" == 0 ]; then
echo "Added key, and restarted service ${DARKYELLOW}$service_name${NORMAL}."
info "Added key, and restarted service ${DARKYELLOW}$service_name${NORMAL}."
else else
echo "something went wrong ! Should check the state of '$DOMAIN' !!"
err "something went wrong ! Should check the state of '$DOMAIN' !!"
exit 1 exit 1
fi fi
else
info "Added key, you'll need to restart service ${DARKYELLOW}$service_name${NORMAL}."
fi
} }

2
rsync-backup/build/Dockerfile

@ -6,7 +6,7 @@ MAINTAINER Valentin Lab <valentin.lab@kalysto.org>
RUN apk add bash rsync sudo openssh-client RUN apk add bash rsync sudo openssh-client
# RUN apt-get update && \ # RUN apt-get update && \
# DEBIAN_FRONTEND=noninteractive apt-get install --force-yes -y --no-install-recommends rsync sudo openssh-client && \
# DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends rsync sudo openssh-client && \
# apt-get clean && \ # apt-get clean && \
# rm -rf /var/lib/apt/lists/* # rm -rf /var/lib/apt/lists/*

38
rsync-backup/hooks/install.d/60-install.sh

@ -4,16 +4,25 @@
set -eux set -eux
[ "${DOMAIN}" ] || {
[ -n "${DOMAIN}" ] || {
echo "Error: you must set \$DOMAIN prior to running this script." >&2 echo "Error: you must set \$DOMAIN prior to running this script." >&2
exit 1 exit 1
} }
[ "${BACKUP_SERVER}" ] || {
[ -n "${BACKUP_SERVER}" ] || {
echo "Error: you must set \$BACKUP_SERVER prior to running this script." >&2 echo "Error: you must set \$BACKUP_SERVER prior to running this script." >&2
exit 1 exit 1
} }
KEY_BACKUP_ID=${KEY_BACKUP_ID:-rsync}
KEY_COMMENTARY="$KEY_BACKUP_ID@$DOMAIN"
MIRROR_DIR_PATH="${MIRROR_DIR_PATH:-$PWD/resources/bin/mirror-dir}"
[ -e "$MIRROR_DIR_PATH" ] || {
echo "Error: you must set \$MIRROR_DIR_PATH or be the root of the charm to run this script." >&2
exit 1
}
## rsync ## rsync
type -p rsync >/dev/null 2>&1 || apt-get install -y rsync </dev/null type -p rsync >/dev/null 2>&1 || apt-get install -y rsync </dev/null
@ -30,8 +39,17 @@ getent passwd rsync >/dev/null ||
chown rsync:rsync /var/lib/rsync chown rsync:rsync /var/lib/rsync
## rsync ssh key creation ## rsync ssh key creation
[ -e /var/lib/rsync/.ssh/id_rsa ] ||
su -c 'ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa -q -C rsync@'"$DOMAIN" - rsync
if [ -e /var/lib/rsync/.ssh/id_rsa.pub ]; then
## Mainly for update of old solution
content=$(cat /var/lib/rsync/.ssh/id_rsa.pub)
commentary=${content##* }
if [ "${commentary}" != "$KEY_COMMENTARY" ]; then
echo "Updating ssh key commentary from '${commentary}' to '$KEY_COMMENTARY'" >&2
sed -ri "s/ [^ ]+\$/ $KEY_COMMENTARY/" /var/lib/rsync/.ssh/id_rsa.pub
fi
else
su -c 'ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa -q -C '"$KEY_COMMENTARY" - rsync
fi
dest="$BACKUP_SERVER" dest="$BACKUP_SERVER"
if [[ "$dest" == *"/"* ]]; then if [[ "$dest" == *"/"* ]]; then
@ -45,9 +63,15 @@ fi
ssh-keyscan "${ssh_options[@]}" -H "${dest}" > /var/lib/rsync/.ssh/known_hosts ssh-keyscan "${ssh_options[@]}" -H "${dest}" > /var/lib/rsync/.ssh/known_hosts
apt-get install kal-shlib-process </dev/null
ln -sf "$PWD/resources/bin/mirror-dir" /usr/local/sbin/mirror-dir
apt-get install -y kal-shlib-{common,process,cmdline,array} </dev/null
case $(awk -Wversion 2>/dev/null || awk --version) in
"mawk 1.3.3"*)
## Not good, it is from 1996, and we still find it on Debian 10
apt-get install -y gawk </dev/null
;;
esac
ln -sf "$PWD/resources/bin/mirror-dir" /usr/local/sbin/mirror-dir
if ! [ -e /etc/mirror-dir/config.yml ]; then if ! [ -e /etc/mirror-dir/config.yml ]; then
@ -63,6 +87,6 @@ cat <<EOF > /etc/cron.d/mirror-dir
SHELL=/bin/bash SHELL=/bin/bash
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
$((RANDOM % 60)) * * * * root mirror-dir -h "$DOMAIN" -d "$BACKUP_SERVER" -u rsync 2>&1 | logger -t mirror-dir
$((RANDOM % 60)) $((RANDOM % 4))-23/4 * * * root mirror-dir backup -q -h "$DOMAIN" -d "$BACKUP_SERVER"
EOF EOF

1
rsync-backup/hooks/schedule_command-relation-joined

@ -41,6 +41,7 @@ $schedule root lock $label -v -D -p 10 -k -c "\
-v \"$RSYNC_CONFIG_DIR:/etc/rsync\" \ -v \"$RSYNC_CONFIG_DIR:/etc/rsync\" \
-v \"$host_path_key:$local_path_key\" \ -v \"$host_path_key:$local_path_key\" \
-v \"$HOST_DATASTORE:/mnt/source\" \ -v \"$HOST_DATASTORE:/mnt/source\" \
-v \"$HOST_COMPOSE_YML_FILE:/mnt/source/compose.yml\" \
--network ${PROJECT_NAME}_default \ --network ${PROJECT_NAME}_default \
\"$DOCKER_BASE_IMAGE\" \ \"$DOCKER_BASE_IMAGE\" \
/mnt/source \"$target\"" 2>&1 | ts '\%F \%T' >> /var/log/cron/${label}_script.log /mnt/source \"$target\"" 2>&1 | ts '\%F \%T' >> /var/log/cron/${label}_script.log

2
rsync-backup/metadata.yml

@ -13,5 +13,5 @@ uses:
default-options: default-options:
## backup every day on random time ## backup every day on random time
schedule: !bash-stdout | schedule: !bash-stdout |
printf "%d %d * * *" "$((RANDOM % 60))" "$((RANDOM % 6))"
printf "%d %s * * *" "$((RANDOM % 60))" "$((RANDOM % 6))-23/6"

630
rsync-backup/resources/bin/mirror-dir

@ -1,5 +1,14 @@
#!/bin/bash #!/bin/bash
##
## Here's an example crontab:
##
## SHELL=/bin/sh
## PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
##
## 49 */2 * * * root mirror-dir run -d core-05.0k.io:10023 -u rsync /etc /home /opt/apps 2>&1 | logger -t mirror-dir
##
#:- #:-
. /etc/shlib . /etc/shlib
@ -8,189 +17,570 @@
include common include common
include parse include parse
include process include process
include cmdline
include array
depends shyaml lock depends shyaml lock
[ "$UID" != "0" ] && echo "You must be root." && exit 1
## ##
## Here's an example crontab:
##
## SHELL=/bin/sh
## PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
##
## 49 */2 * * * root mirror-dir -d core-05.0k.io:10023 -u rsync /etc /home /opt/apps 2>&1 | logger -t mirror-dir
## Functions
## ##
usage="usage: $exname -d DEST1 [-d DEST2 [...]] [-u USER] [DIR1 [DIR2 ...]]
MIRROR_DIR_LOG=/var/log/mirror-dir.log
MIRROR_DIR_REPORT_MAX_READ_LINE=1000000
R_DATE='[0-9]{4,4}-[01][0-9]-[0-3][0-9] [012][0-9]:[0-5][0-9]:[0-5][0-9][+-][01][0-9][0-5][0-9]'
mirror-dir:report() {
local s1 s2 s3 d1 d2 d3 host source sent received rate
while read s1 s2 d1 d2 host source sent received rate; do
s=$(date -d"$s1 $s2" --rfc-3339=seconds)
s_s=$(date -d"$s1 $s2" +%s)
d_s=$(date -d"$d1 $d2" +%s)
duration=$((d_s - s_s))
printf "%s %-15s %-30s | %s %s %s %10s\n" \
"$s" "$host" "$source" "$sent" "$received" "$rate" "$(print_duration "$duration")"
done < <(
tail "$MIRROR_DIR_LOG" -n "$MIRROR_DIR_REPORT_MAX_READ_LINE" |
egrep "^${R_DATE} (Starting|sent)" |
sed -r 's/\s*\(.*\)$//g
s/ (([0-9]{1,3},)*[0-9]{1,3})(\.[0-9]{2,2})? bytes(\/sec)?/:\1/g
s/,//g
s/ :([0-9]+)$/ rate:\1/g' |
grep -v "^--$" |
sed -r "/Starting/N;
{s/\n(${R_DATE} )(.*)sent/ \1 sent/g}" |
sed -r "s/^(${R_DATE} )Starting rsync: ([^ ]+) -> ([^ ]+) (${R_DATE} )/\1\4\3 \2/g
s/ +/ /g
s/ [a-z]+:/ /g" |
egrep "^${R_DATE} ${R_DATE} [^ ]+ /[^ ]+ [0-9]+ [0-9]+ [0-9]+$"
) |
numfmt --field=6,7 --to=iec-i --suffix=B --padding=8 |
numfmt --field=8 --to=iec-i --suffix=B/s --padding=10 |
sed -r 's/ \| / /g'
}
Preserve as much as possible the source structure, keeping hard-links, acl,
exact numerical uids and gids, and being able to resume in very large files.
Options:
DIR1 ... DIRn
Local directories that should be mirrored on destination(s).
mirror-dir:run() {
examples: /etc /home /var/backups
local hostname="$1" dests="$2" source_dirs
shift 2
If no directories are provided, the config file root
entries will be used all as destination to copy.
dests=($dests) ## individual dests can't use any space-like separators
source_dirs=("$@")
-d DESTn
Can be repeated. Specifies host destination towards which
files will be send. Note that you can specify port number after
a colon and a bandwidth limit for rsync after a '/'.
dest_path=/var/mirror/$hostname
state_dir=/var/run/mirror-dir
mkdir -p "$state_dir"
rsync_options=(
${RSYNC_OPTIONS:-} --stats --out-format='%i %o %f %l %b')
ssh_options=(${SSH_OPTIONS:--o StrictHostKeyChecking=no})
examples: -d liszt.musicalta:10022 -d 10.8.0.19/200
for dest in "${dests[@]}"; do
dest_rsync_options=("${rsync_options[@]}")
if [[ "$dest" == *"/"* ]]; then
dest_rsync_options+=("--bwlimit" "${dest##*/}")
dest="${dest%/*}"
fi
dest_for_session="$dest"
-u USER (default: 'backuppc')
for d in "${source_dirs[@]}"; do
Local AND destination user to log as at both ends to transfer file.
This local user need to have a NOPASSWD ssh login towards it's
account on destination. This destination account should have
full permissions access without passwd to write with rsync-server
in the destination directory.
current_rsync_options=("${dest_rsync_options[@]}")
-h STORE (default is taken of the hostname file)
session_id="$(echo "${dest_for_session}$d" | md5_compat)"
session_id="${session_id:1:8}"
Set the destination store, this is the name of the directory where
the files will all directories will be copied. Beware ! if 2 hosts
use the same store, this means they'll conflictingly update the
same destination directory. Only use this if you know what you
are doing.
if [[ "$dest" == *":"* ]]; then
ssh_options+=("-p" "${dest#*:}")
dest="${dest%%:*}"
fi
"
dirpath="$(dirname "$d")"
if [ "$dirpath" == "/" ]; then
dir="/$(basename "$d")"
else
dir="$dirpath/$(basename "$d")"
fi
dests=()
source_dirs=()
hostname=
while [ "$#" != 0 ]; do
case "$1" in
"-d")
dests+=("$2")
shift
[ -d "$dir" ] || {
warn "ignoring '$dir' as it is not existing."
continue
}
lock_label=$exname-$hostname-${session_id}
tmp_exclude_patterns=/tmp/${lock_label}.exclude_patterns.tmp
## Adding the base of the dir if required... seems necessary with
## the rsync option that replicate the full path.
has_exclude_pattern=
while read-0 exclude_dir; do
if [ -z "$has_exclude_pattern" ]; then
echo "Adding exclude patterns for source '$dir':" >&2
has_exclude_pattern=1
fi
if [[ "$exclude_dir" == "/"* ]]; then
exclude_dir="$dir${exclude_dir}"
fi
echo " - $exclude_dir" >&2
p0 "$exclude_dir"
done < <(get_exclude_patterns "$dir") > "$tmp_exclude_patterns"
if [ -n "$has_exclude_pattern" ]; then
current_rsync_options+=("-0" "--exclude-from"="$tmp_exclude_patterns")
else
echo "No exclude patterns for '$dir'."
fi
echo ---------------------------------
echo "Starting rsync: $d -> $dest ($(date))"
cmd=(
nice -n 15 \
rsync "${current_rsync_options[@]}" -azvARH \
-e "sudo -u $user ssh ${ssh_options[*]}" \
--delete --delete-excluded \
--partial --partial-dir .rsync-partial \
--numeric-ids "$dir/" "$user@$dest":"$dest_path"
)
echo "${cmd[@]}"
start="$SECONDS"
retry=1
errlvls=()
while true; do
lock "$lock_label" -v -D -k -- "${cmd[@]}"
errlvl="$?"
case "$errlvl" in
20) ## Received SIGUSR1, SIGINTT
echo "!! Rsync received SIGUSR1 or SIGINT."
echo " .. Full interruption while $d -> $dest and after $((SECONDS - start))s"
append_trim "${state_dir}/${session_id}-fail" \
"$dest $d $((SECONDS - start)) signal SIGUSR1, SIGINT or SIGHUP"
break 2
;; ;;
"-h")
hostname="$2"
shift
137|143) ## killed SIGKILL, SIGTERM
echo "!! Rsync received $(kill -l "$errlvl")"
echo " .. Full interruption while $d -> $dest and after $((SECONDS - start))s"
append_trim "${state_dir}/${session_id}-fail" \
"$dest $d $((SECONDS - start)) signal: $(kill -l "$errlvl")"
break 2
;; ;;
"-u")
user="$2"
shift
0)
echo "Rsync finished with success $d -> $dest in $((SECONDS - start))s"
append_trim "${state_dir}/${session_id}-success" \
"$dest $d $((SECONDS - start)) OK"
break
;; ;;
*) *)
source_dirs+=("$1")
errlvls+=("$errlvl")
echo "!! Rsync failed with an errorlevel $errlvl after $((SECONDS - start))s since start."
if [ "$retry" -lt 3 ]; then
echo "!! Triggering a retry ($((++retry))/3)"
continue
else
echo "!! Tried 3 times, bailing out."
echo " .. interruption of $d -> $dest after $((SECONDS - start))s"
append_trim "${state_dir}/${session_id}-fail" \
"$dest $d $((SECONDS - start))" \
"Failed after 3 retries (errorlevels: ${errlvls[@]})"
break
fi
;; ;;
esac esac
done
if [ -n "$has_exclude_pattern" ]; then
rm -fv "$tmp_exclude_patterns"
fi
done
done
}
get_exclude_patterns() {
local dir="$1"
[ -e "$config_file" ] || return
cat "$config_file" | shyaml get-values-0 "${dir//.\\./}.exclude" 2>/dev/null
}
append_trim() {
local f="$1"
shift shift
done
e "$(date --rfc-3339=s) $*"$'\n' >> "$f" &&
tail -n 5000 "$f" > "$f".tmp &&
mv "$f"{.tmp,}
}
log_tee() { tee -a "$MIRROR_DIR_LOG"; }
log_file() { cat >> "$MIRROR_DIR_LOG"; }
if test -z "$hostname"; then
hostname=$(hostname)
fi
if test -z "$hostname"; then
die "Couldn't figure a valid hostname. Please specify one with \`\`-h STORENAME\`\`."
fi
get_ids() {
local session_id id_done
declare -A id_done
for file in "$state_dir"/*{-fail,-success}; do
session_id=${file%-*}
[ "${id_done["$session_id"]}" ] && continue
id_done["$session_id"]=1
echo "${session_id##*/}"
done
}
user=${user:-backuppc}
dest_path=/var/mirror/$hostname
mirror-dir:_get_sources() {
local DIR=("$@")
config_file="/etc/$exname/config.yml"
config_file="/etc/$exname/config.yml"
if [ "${#source_dirs[@]}" == 0 ]; then
if [ "${#DIR[@]}" == 0 ]; then
if [ -e "$config_file" ]; then if [ -e "$config_file" ]; then
echo "No source provided on command line.. "
echo " ..so reading '$config_file' for default sources..."
source_dirs=($(eval echo $(shyaml get-values default.sources < "$config_file")))
info "No source provided on command line," \
"reading '$config_file' for default sources"
DIR=($(eval echo $(shyaml get-values default.sources < "$config_file")))
fi fi
if [ "${#source_dirs[@]}" == 0 ]; then
fi
array_values_to_stdin DIR
}
[[ "${BASH_SOURCE[0]}" != "${0}" ]] && SOURCED=true
version=0.1
desc='Manage mirroring of local directory to distant hosts'
help=""
##
## Code
##
cmdline.spec.gnu
cmdline.spec.gnu backup
cmdline.spec:backup:valued:-d,--dest:run() {
dests+=("$1")
}
dests=()
cmdline.spec::cmd:backup:run() {
# usage="usage: $exname -d DEST1 [-d DEST2 [...]] [-u USER] [DIR1 [DIR2 ...]]
# Preserve as much as possible the source structure, keeping hard-links, acl,
# exact numerical uids and gids, and being able to resume in very large files.
# "
: :posarg: [DIR...] 'Local directories that should be mirrored
on destination(s).
Examples: /etc /home /var/backups
If no directories are provided, the config
file root entries will be used all as
destination to copy.'
: :optval: -d,--dest 'Can be repeated. Specifies host
destination towards which files will be
send. Note that you can specify port
number after a colon and a bandwidth limit
for rsync after a '/'.
Examples: -d liszt.musicalta:10022
-d 10.8.0.19/200'
: :optval: -u,--user "(default: 'rsync')
Local AND destination system user to log
as at both ends to transfer file. This
local user need to have a no password ssh
login to it's own account on destination.
This destination account should have full
permissions access without passwd to write
with rsync-server in the destination
directory."
: :optval: -h,--hostname "(default is taken of the hostname file)
Set the destination store, this is the
name of the directory where the files
will all directories will be copied.
Beware ! if 2 hosts use the same store,
this means they'll conflictingly update
the same destination directory. Only
use this if you know what you are
doing."
: :optfla: -q,--quiet "Prevent output on stderr. Please note that
output is always written in log file."
[ "$UID" != "0" ] && echo "You must be root." && exit 1
[ -n "$opt_hostname" ] || opt_hostname=$(hostname)
if [ -n "$opt_quiet" ]; then
log_facility=log_file
else
log_facility=log_tee
fi
if [ -z "$opt_hostname" ]; then
err "Couldn't figure a valid hostname. Please specify one with \`\`-h STORENAME\`\`."
return 1
fi
user=${opt_user:-rsync}
config_file="/etc/$exname/config.yml"
array_read-0 DIR < <(
{
{
mirror-dir:_get_sources "${DIR[@]}"
} 3>&1 1>&2 2>&3 | "$log_facility"
} 3>&1 1>&2 2>&3
)
if [ "${#DIR[@]}" == 0 ]; then
err "You must specify at least one source directory to mirror" \ err "You must specify at least one source directory to mirror" \
"on command line (or in a config file)." "on command line (or in a config file)."
print_usage
echo "$usage" >&2
exit 1 exit 1
fi fi
fi
echo "Sources directories are: ${source_dirs[@]}"
info "Source directories are: ${DIR[@]}" 2>&1 | "$log_facility"
if [ "${#dests[@]}" == 0 ]; then
err "You must specify at least a destination."
print_usage
exit 1
fi
if [ "${#dests[@]}" == 0 ]; then
err "You must specify at least a destination (using \`\`-d\`\` or \`\`--dest\`\`)."
echo "$usage" >&2
return 1
fi
rsync_options=(${RSYNC_OPTIONS:-})
ssh_options=(${SSH_OPTIONS:-})
## XXXvlab: note that we use here a special version of awk supporting
## ``strftime``. This is only to prefix a date to the logs. Yes, we know
## about ``--out-format`` and its ``%t`` which would be ideal, but it
## doesn't output proper UTC time (it is system time, no timezone info).
mirror-dir:run "$opt_hostname" "${dests[*]}" "${DIR[@]}" 2>&1 |
awk -W interactive '{ print strftime("%Y-%m-%d %H:%M:%S%z"), $0 }' |
"$log_facility"
get_exclude_patterns() {
local dir="$1"
[ -e "$config_file" ] || return
cat "$config_file" | shyaml get-values-0 "$(echo "$dir" | sed -r 's%\.%\\.%g').exclude"
} }
for dest in "${dests[@]}"; do
for d in "${source_dirs[@]}"; do
current_rsync_options=("${rsync_options[@]}")
cmdline.spec.gnu report
cmdline.spec::cmd:report:run() {
mirror-dir:report
}
cmdline.spec:check:valued:-d,--dest:run() {
dests+=("$1")
}
cmdline.spec.gnu check
cmdline.spec::cmd:check:run() {
# usage="usage: $exname -d DEST1 [-d DEST2 [...]] [DIR1 [DIR2 ...]]
# Checks that mirror-dir did it's job. Will send an email if not.
# "
: :posarg: [DIR...] 'Local directories that should be mirrored
on destination(s).
Examples: /etc /home /var/backups
If no directories are provided, the config
file root entries will be used all as
destination to copy.'
: :optval: -d,--dest 'Can be repeated. Specifies host
destination towards which files will be
send. Note that you can specify port
number after a colon and a bandwidth limit
for rsync after a '/'.
Examples: -d liszt.musicalta:10022
-d 10.8.0.19/200'
: :optval: -n,--time-spec "Give a full English time spec about how
old the last full run of rsync should
be at most. Defaults to '12 hours'.
Examples: -n '12 hours'
-n '1 day'"
: :optfla: -m,--mail-alert "Send alert via email. This is intended to
use in cron."
[ "$UID" != "0" ] && echo "You must be root." && exit 1
if [ "${#dests[@]}" == 0 ]; then
err "You must specify at least a destination (using \`\`-d\`\` or \`\`--dest\`\`)."
echo "$usage" >&2
return 1
fi
if [ -n "$opt_mail_alert" ]; then
CHECK_DEFAULT_SOURCE=/etc/default/alerting
[ -f "$CHECK_DEFAULT_SOURCE" ] && . "$CHECK_DEFAULT_SOURCE"
if [ "${#MAIL_DESTS[@]}" == 0 ]; then
echo "You must set at least one recipient destination for mails." >&2
echo " You can do that in '$CHECK_DEFAULT_SOURCE', using the variable" >&2
echo " '\$MAIL_DESTS'. Note this is a bash array variable." >&2
exit 1
fi
fi
array_read-0 DIR < <(mirror-dir:_get_sources "${DIR[@]}")
if [ "${#DIR[@]}" == 0 ]; then
err "You must specify at least one source directory to mirror" \
"on command line (or in a config file)."
echo "$usage" >&2
exit 1
fi
time_spec="${opt_time_spec:-12 hours}"
state_dir=/var/run/mirror-dir
## Getting max string length of source
dir_max_len=0
for d in "${DIR[@]}"; do
[ "$dir_max_len" -lt "${#d}" ] &&
dir_max_len="${#d}"
done
## Getting max string length of dests
dest_max_len=0
for d in "${dests[@]}"; do
[ "$dest_max_len" -lt "${#d}" ] &&
dest_max_len="${#d}"
done
declare -A sessions=()
bad_sessions=()
msg=()
for dest in "${dests[@]}"; do
if [[ "$dest" == *"/"* ]]; then if [[ "$dest" == *"/"* ]]; then
current_rsync_options+=("--bwlimit" "${dest##*/}") current_rsync_options+=("--bwlimit" "${dest##*/}")
dest="${dest%/*}" dest="${dest%/*}"
fi fi
if [[ "$dest" == *":"* ]]; then
ssh_options+=("-p" "${dest#*:}")
dest="${dest%%:*}"
for d in "${DIR[@]}"; do
session_id="$(echo "$dest$d" | md5_compat)"
session_id="${session_id:1:8}"
sessions["$session_id"]="$dest $d"
f=$(find "$state_dir" \
-maxdepth 1 -newermt "-$time_spec" \
-type f -name "${session_id}-success")
if [ -z "$f" ]; then
if [ -e "$state_dir/${session_id}-success" ]; then
msg+=("$(printf "%-${dest_max_len}s %-${dir_max_len}s last full sync %s" \
"$dest" "$d" \
"$(stat -c %y "$state_dir/${session_id}-success" |
sed -r 's/\.[0-9]{9,9} / /g')")")
else
msg+=("$(printf "%-${dest_max_len}s %-${dir_max_len}s never finished yet" \
"$dest" "$d")")
fi
bad_sessions+=("$session_id")
fi fi
done
done
dirpath="$(dirname "$d")"
if [ "$dirpath" == "/" ]; then
dir="/$(basename "$d")"
[ "${#msg[@]}" == 0 ] && return 0
if [ -z "$opt_mail_alert" ]; then
echo
echo "${DARKRED}These destination/source directory were" \
"last synced more than $time_spec ago:${NORMAL}"
for m in "${msg[@]}"; do
printf " %s\n" "$m"
done
echo
echo "${DARKRED}Last failed logs:${NORMAL}"
for m in "${bad_sessions[@]}"; do
if [ -e "${state_dir}"/$m-fail ]; then
echo " ${sessions[$m]}:"
tail -n 5 "${state_dir}"/$m-fail | cut -f 1,2,5- -d " " | sed -r "s/^/ /g"
echo
else else
dir="$dirpath/$(basename "$d")"
echo " ${sessions[$m]}: no fail log available"
fi
done
return 1
fi fi
[ -d "$dir" ] || {
warn "ignoring '$dir' as it is not existing."
continue
}
lock_label=$exname-$hostname-$(echo "$dest" | md5_compat | cut -f 1 -d " ")
##
## Mail
##
exclude_patterns="$(get_exclude_patterns "$dir")"
if [ "${#msg[@]}" != 0 ]; then
tmp_exclude_patterns=/tmp/${lock_label}.$(echo "$d" | md5_compat | cut -f 1 -d " ").exclude_patterns.tmp
if [ "$exclude_patterns" ]; then
echo "Adding exclude patterns..."
cat <<EOF | mail -s "[$(hostname)] mirror backup failing" "${MAIL_DESTS[@]}"
Hi,
## Adding the base of the dir if required... seems necessary with
## the rsync option that replicate the full path.
while read-0 exclude_dir; do
if [[ "$exclude_dir" == "/"* ]]; then
echo -en "$dir""$(echo "$exclude_dir" | cut -c 1-)\0"
Some configured mirroring targets have not finished gracefully in
the last $time_spec. Please see for yourself:
$(
for m in "${msg[@]}"; do
echo " $m"
done
)
You might want to find these following information of some use:
$(
for m in "${bad_sessions[@]}"; do
if [ -e "${state_dir}"/$m-fail ]; then
echo " ${sessions[$m]}:"
tail -n 5 "${state_dir}"/$m-fail | cut -f 1,2,5- -d " " | sed -r "s/^/ /g"
echo
else else
echo -en "$exclude_dir\0"
echo " ${sessions[$m]}: no fail log available"
fi fi
done < <(get_exclude_patterns "$dir") > "$tmp_exclude_patterns"
cat "$tmp_exclude_patterns" | xargs -0 -n 1 echo
current_rsync_options=("-0" "--exclude-from"="$tmp_exclude_patterns" "${current_rsync_options[@]}")
else
echo "No exclude patterns for '$dir'."
done
)
Hoping all this will help you sort out the issue...
Yours sincerly,
--
mirror-dir-check
PS: You received this email because your email is listed in
\$MAIL_DESTS of '$CHECK_DEFAULT_SOURCE' of '$(hostname)'
(also known as $(cat /etc/mailname)).
EOF
fi fi
echo ---------------------------------
date
echo nice -n 15 rsync "${current_rsync_options[@]}" -azvARH -e "'sudo -u $user ssh ${ssh_options[*]}'" --delete --delete-excluded --partial --partial-dir .rsync-partial --numeric-ids "$dir/" "$user@$dest":"$dest_path"
}
lock "$lock_label" -v -D -k -- \
nice -n 15 \
rsync "${current_rsync_options[@]}" -azvARH \
-e "sudo -u $user ssh ${ssh_options[*]}" \
--delete --delete-excluded --partial --partial-dir .rsync-partial \
--numeric-ids "$dir/" "$user@$dest":"$dest_path"
rm -fv "$tmp_exclude_patterns"
done
done
cmdline::parse "$@"

12
searx/hooks/web_proxy-relation-joined

@ -0,0 +1,12 @@
#!/bin/bash
set -e
URL=$(relation-get url) || exit 1
config-add "\
services:
$MASTER_BASE_SERVICE_NAME:
environment:
BASE_URL: $URL
"

Some files were not shown because too many files changed in this diff

Loading…
Cancel
Save