diff --git a/apache/build/Dockerfile b/apache/build/Dockerfile index 8792c74c..00beb169 100644 --- a/apache/build/Dockerfile +++ b/apache/build/Dockerfile @@ -1,7 +1,7 @@ ## copy of 'php:7.3-apache' image ## XXXvlab: will need to move towards separate apache and php, so that nginx can ## replace apache in all conf. -FROM docker.0k.io/php:7.3-apache-1 +FROM docker.0k.io/php:7.4.13-apache ## Limesurvey # RUN apt-get update && \ @@ -20,7 +20,7 @@ FROM docker.0k.io/php:7.3-apache-1 ## Framadate RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y libicu-dev libpq-dev libxml2-dev && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y libonig-dev libicu-dev libpq-dev libxml2-dev && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* && \ docker-php-ext-install mbstring intl xml pdo_pgsql @@ -30,7 +30,7 @@ RUN apt-get update && \ DEBIAN_FRONTEND=noninteractive apt-get install -y libexif-dev libexif12 libfreetype6-dev libjpeg62-turbo-dev libpng-dev && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* && \ - docker-php-ext-configure gd --with-freetype-dir=/usr/include/ --with-jpeg-dir=/usr/include/ && \ + docker-php-ext-configure gd --with-freetype=/usr/include/ --with-jpeg=/usr/include/ && \ docker-php-ext-install exif gd pdo_mysql mysqli ## gogocarto @@ -39,13 +39,13 @@ RUN apt-get update && \ libbz2-dev libc-client-dev libcurl4-openssl-dev libfreetype6-dev \ libgd-dev libicu-dev libkrb5-dev libmagickcore-dev libmagickwand-dev \ libmcrypt-dev libmemcached-dev libtidy-dev libxml2-dev libxslt-dev \ - libz-dev libzip-dev" && \ + libz-dev libzip-dev libonig-dev" && \ DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends \ bzip2 cron g++ gettext git gnupg imagemagick libfreetype6 libgd3 \ libmcrypt4 libmemcached11 libmemcachedutil2 libsodium23 libtidy5deb1 \ libxml2 libxslt1.1 libzip4 nano openssl unzip ${BUILD_PACKAGES} && \ - docker-php-ext-configure gd --with-freetype-dir=/usr/include/ \ - --with-jpeg-dir=/usr/include/ && \ + docker-php-ext-configure gd --with-freetype=/usr/include/ \ + --with-jpeg=/usr/include/ && \ docker-php-ext-configure imap --with-kerberos --with-imap-ssl && \ docker-php-ext-configure hash --with-mhash && \ docker-php-ext-install bcmath bz2 calendar dba curl exif gd gettext \ @@ -63,14 +63,11 @@ RUN apt-get update && \ rm -rf /var/lib/apt/lists/* - - ## XXXvlab: could load these in 'entrypoint.sh' to be more dynamic RUN a2enmod headers proxy_http rewrite ssl proxy_wstunnel http2 proxy_connect ## Can remove this when SSL certificate are all valid ones -RUN apt-get update && apt-get install -y --force-yes ssl-cert - +RUN apt-get update && apt-get install -y ssl-cert COPY entrypoint.sh /entrypoint.sh diff --git a/bitwarden/metadata.yml b/bitwarden/metadata.yml index ecdc0a09..85d1599b 100644 --- a/bitwarden/metadata.yml +++ b/bitwarden/metadata.yml @@ -1,6 +1,6 @@ description: Bitwarden Server -#docker-image: bitwardenrs/server:1.17.0-alpine -docker-image: docker.0k.io/bitwarden:1.17.0 +#docker-image: bitwardenrs/server:1.22.2-alpine +docker-image: docker.0k.io/bitwarden:1.22.2 data-resources: - /data uses: diff --git a/codimd/hooks/init b/codimd/hooks/init deleted file mode 100755 index 56997ce6..00000000 --- a/codimd/hooks/init +++ /dev/null @@ -1,12 +0,0 @@ -#!/bin/bash - -init-config-add "\ -$SERVICE_NAME: - environment: - CMD_USECDN: \"false\" -" - -## ``codimd`` create uploads folder with wrong permission - -mkdir -p "$SERVICE_DATASTORE/home/hackmd/app/public/uploads" -chown -R 1500:1500 "$SERVICE_DATASTORE/home/hackmd/app/public/" diff --git a/cron/build/Dockerfile b/cron/build/Dockerfile index c103d749..7e7c5d12 100644 --- a/cron/build/Dockerfile +++ b/cron/build/Dockerfile @@ -1,7 +1,7 @@ -FROM debian:jessie +FROM docker.0k.io/debian:jessie RUN apt-get update && \ - DEBIAN_FRONTEND=noninteractive apt-get install -y --force-yes cron moreutils && \ + DEBIAN_FRONTEND=noninteractive apt-get install -y cron moreutils && \ apt-get clean && \ rm -rf /var/lib/apt/lists/* diff --git a/cron/hooks/init b/cron/hooks/init index 69a9633a..9bb74100 100755 --- a/cron/hooks/init +++ b/cron/hooks/init @@ -19,6 +19,12 @@ fi exit 1 } +[ "$HOST_COMPOSE_LAUNCHER_OPTS" ] || { + err "\$HOST_COMPOSE_LAUNCHER_OPTS is expected to be set." + exit 1 +} + + [ "$COMPOSE_LAUNCHER_BIN" ] || { err "\$COMPOSE_LAUNCHER_BIN is expected to be set." exit 1 @@ -31,7 +37,7 @@ $CHARM_NAME: - /etc/timezone:/etc/timezone:ro - \$DOCKER_COMPOSE_PATH/bin/dc:/bin/dc:ro - $COMPOSE_LAUNCHER_BIN:/bin/compose:ro - - $COMPOSE_LAUNCHER_OPTS:$COMPOSE_LAUNCHER_OPTS:ro + - $HOST_COMPOSE_LAUNCHER_OPTS:$COMPOSE_LAUNCHER_OPTS:ro environment: TZ: $timezone COMPOSE_LAUNCHER_OPTS: $COMPOSE_LAUNCHER_OPTS diff --git a/cron/hooks/pre_deploy b/cron/hooks/pre_deploy new file mode 100755 index 00000000..23bc254c --- /dev/null +++ b/cron/hooks/pre_deploy @@ -0,0 +1,20 @@ +#!/bin/bash +## Should be executable N time in a row with same result. + +set -e + +cron_config_hash() { + debug "Adding config hash to enable recreating upon config change." + config_hash=$({ + find "$SERVICE_CONFIGSTORE/etc/cron"{,.hourly,.weekly,.daily,.monthly} \ + -type f -exec md5sum {} \; + } | md5_compat) || exit 1 + init-config-add " +$MASTER_BASE_SERVICE_NAME: + labels: + - compose.config_hash=$config_hash +" +} + + +cron_config_hash || exit 1 diff --git a/cron/metadata.yml b/cron/metadata.yml index dc711938..131149a6 100644 --- a/cron/metadata.yml +++ b/cron/metadata.yml @@ -11,7 +11,9 @@ data-resources: host-resources: - /var/run/docker.sock provides: - schedule-command: + schedule-command: ## for one command + tech-dep: False + schedule-commands: ## for several commands tech-dep: False uses: ## optional diff --git a/cyclos/hooks/init b/cyclos/hooks/init index 2947a5ee..f2046f4a 100755 --- a/cyclos/hooks/init +++ b/cyclos/hooks/init @@ -40,7 +40,7 @@ done < <(array_values_to_stdin PROPS) init-config-add " $SERVICE_NAME: environment: - JAVA_OPTS: | + JAVA_OPTS: >- -Dcyclos.cors.origin=* -Dcyclos.header.remoteAddress=X-Forwarded-For -Dcyclos.header.protocol=X-Forwarded-Proto diff --git a/cyclos/hooks/pre_deploy b/cyclos/hooks/pre_deploy new file mode 100755 index 00000000..3b51a64f --- /dev/null +++ b/cyclos/hooks/pre_deploy @@ -0,0 +1,63 @@ +#!/bin/bash + +## +## Get domain in option of relation "web-proxy" +## + +## XXXvlab: there is a tiny lapse of time where database is not yet +## installed, and admin password is the default value. + + +. lib/common + +set -ex + + +admin_password=$(options-get admin-password 2>/dev/null ) || exit 1 + +CONTROL_PASSWORD_FILE="$SERVICE_DATASTORE/.control-pass" +## Was it already properly propagated to database ? +control_password=$(H "${admin_password}") +if ! [ -e "$CONTROL_PASSWORD_FILE" ] || [ "$control_password" != "$(cat "$CONTROL_PASSWORD_FILE")" ]; then + + hash="$(htpasswd -nbBC 10 USER "$admin_password" | cut -f 2- -d :)" || { + err "Couldn't generate hash for admin password." + exit 1 + } + + if ! sql < <(e " + UPDATE passwords SET value = '$hash' + WHERE user_id = 1 + AND status = 'ACTIVE' + AND password_type_id in ( + SELECT id FROM password_types + WHERE input_method = 'TEXT_BOX' + AND password_mode = 'MANUAL'); + "); then + debug "Failed to set password for admin users." + exit 1 + fi + mkdir -p "${CONTROL_PASSWORD_FILE%/*}" + e "$control_password" > "$CONTROL_PASSWORD_FILE" +fi + + + +url=$(named-relation-get "web-proxy" url) || exit 1 + +CONTROL_URL_FILE="$SERVICE_DATASTORE/.control-url" +## Was it already properly propagated to database ? +control_url=$(H "${url}") +if ! [ -e "$CONTROL_URL_FILE" ] || [ "$control_url" != "$(cat "$CONTROL_URL_FILE")" ]; then + ## In ``configurations`` table, columns login_url, logout_url, root_url + + if ! sql < <(e " + UPDATE configurations + SET + root_url = '$url' + "); then + debug "Failed to set password for admin users." + exit 1 + fi + e "$control_password" > "$CONTROL_URL_FILE" +fi diff --git a/cyclos/lib/common b/cyclos/lib/common index c127b1da..7ba451a5 100644 --- a/cyclos/lib/common +++ b/cyclos/lib/common @@ -1,4 +1,4 @@ -# -*- mode: bash -*- +# -*- mode: shell-script -*- CYCLOS_CONFIG="/usr/local/cyclos/WEB-INF/classes/cyclos.properties" @@ -12,3 +12,62 @@ cyclos:build() { docker cp "$container_id":"$CYCLOS_CONFIG" "$HOST_CYCLOS_CONFIG" && docker rm "$container_id" } + + +named-relation-get-target-service() { + local relation="$1" ts + if ! read-0 ts _ _ < <(get_service_relation "$SERVICE_NAME" "$relation"); then + err "Couldn't find relation ${DARKCYAN}$relation${NORMAL}." + return 1 + fi + e "$ts" +} + + +## XXXvlab: Consider for inclusion in compose-core +relation-get-config() { + local relation="$1" ts relation_dir + + ts=$(named-relation-get-target-service "$relation") || return 1 + relation_dir=$(get_relation_data_dir "$SERVICE_NAME" "$ts" "$relation") || return 1 + cat "${relation_dir}/data" +} + + +named-relation-get() { + local relation="$1" key="$2" config + + config=$(relation-get-config "$relation") || return 1 + + e "$config" | shyaml get-value "$key" || { + err "Couldn't get ${WHITE}$key${NORMAL} value" \ + "in ${DARKCYAN}$relation${NORMAL} relation's data." + exit 1 + } +} + + +sql() { + ( + DBNAME="$(named-relation-get "postgres-database" dbname)" || exit 1 + ts=$(named-relation-get-target-service "postgres-database") || exit 1 + + export SERVICE_NAME="$ts" + export SERVICE_DATASTORE="$DATASTORE/$SERVICE_NAME" + DOCKER_BASE_IMAGE=$(service_base_docker_image "$SERVICE_NAME") + export DOCKER_BASE_IMAGE + + target_charm=$(get_service_charm "$ts") || exit 1 + target_charm_path=$(charm.get_dir "$target_charm") || exit 1 + + set +e + + . "$target_charm_path/lib/common" + + set -e + + ensure_db_docker_running + + ddb "$DBNAME" + ) +} \ No newline at end of file diff --git a/cyclos/metadata.yml b/cyclos/metadata.yml index 35a33b8d..5368b6dd 100644 --- a/cyclos/metadata.yml +++ b/cyclos/metadata.yml @@ -1,5 +1,5 @@ -## Based on cyclos/cyclos:4.13.2 -docker-image: docker.0k.io/cyclos:4.13.2 +## Based on cyclos/cyclos:4.14 +docker-image: docker.0k.io/cyclos:4.14.7 config-resources: - /usr/local/cyclos/WEB-INF/classes/cyclos.properties data-resources: @@ -24,6 +24,8 @@ uses: - earthdistance - postgis - unaccent + init-sql: !bash-stdout | + zcat "$BASE_CHARM_PATH/src/init.sql.gz" log-rotate: #constraint: required | recommended | optional #auto: pair | summon | none ## default: pair diff --git a/cyclos/src/init.sql.gz b/cyclos/src/init.sql.gz new file mode 100644 index 00000000..c2ae000e Binary files /dev/null and b/cyclos/src/init.sql.gz differ diff --git a/drone/metadata.yml b/drone/metadata.yml index a17110a4..ee98b64a 100644 --- a/drone/metadata.yml +++ b/drone/metadata.yml @@ -47,3 +47,17 @@ uses: proxy: "Public access" default-options: target: !var-expand ${MASTER_BASE_SERVICE_NAME}:80 + backup: + constraint: recommended + auto: pair + solves: + backup: "Automatic regular backups of sql database" + default-options: + ## First pattern matching wins, no pattern matching includes. + ## include-patterns are checked first, then exclude-patterns + ## Patterns rules: + ## - ending / for directory + ## - '*' authorized + ## - must start with a '/', will start from $SERVICE_DATASTORE + # include-patterns: + # - /var/backups/pg/ diff --git a/etherpad/README.org b/etherpad/README.org new file mode 100644 index 00000000..0d0101e4 --- /dev/null +++ b/etherpad/README.org @@ -0,0 +1,33 @@ +# -*- ispell-local-dictionary: "english" -*- + +#+TITLE: Etherpad Charm + +* Upgrade + +Based on https://github.com/ether/etherpad-lite , following: + +https://github.com/ether/etherpad-lite/blob/develop/doc/docker.md + +Used: + +#+begin_src sh +TAG=1.8.14 +git clone https://github.com/ether/etherpad-lite --depth 1 -b $TAG +docker build --build-arg INSTALL_SOFFICE=1 \ + --build-arg ETHERPAD_PLUGINS=" \ + ep_font_family ep_mammoth ep_comments_page ep_table_of_contents \ + ep_markdown ep_image_upload ep_spellcheck ep_headings2 ep_align \ + ep_who_did_what ep_what_have_i_missed ep_embedmedia \ + ep_openid_connect ep_rss ep_git_commit_saved_revision" \ + . -t docker.0k.io/etherpad:${TAG}-0k +docker push docker.0k.io/etherpad:${TAG}-0k +#+end_src + + +* Admin password + +We choose to NOT include admin panel as it allows to change settings +and install plugins but this will not allow to reproduce an install +easily. We can do this on the =compose.yml= side in a reproducible +manner. + diff --git a/etherpad/hooks/init b/etherpad/hooks/init new file mode 100755 index 00000000..7f2e43e3 --- /dev/null +++ b/etherpad/hooks/init @@ -0,0 +1,39 @@ +#!/bin/bash + +## Init is run on host +## For now it is run every time the script is launched, but +## it should be launched only once after build. + +## Accessible variables are: +## - SERVICE_NAME Name of current service +## - DOCKER_BASE_IMAGE Base image from which this service might be built if any +## - SERVICE_DATASTORE Location on host of the DATASTORE of this service +## - SERVICE_CONFIGSTORE Location on host of the CONFIGSTORE of this service + + +. lib/common + +set -e + + +dirs=( + "$SERVICE_DATASTORE/var/lib/etherpad" +) + +uid_gid=($(docker_get_uid_gid "$SERVICE_NAME" "etherpad" "etherpad")) || { + err "Could not fetch uid/gid on image of service ${DARKYELLOW}$SERVICE_NAME${NORMAL}." + return 1 +} + +uid="${uid_gid[0]}" +gid="${uid_gid[1]}" +for dir in "${dirs[@]}"; do + mkdir -p "$dir" + find "$dir" \! -uid "$uid" -print0 | while read-0 f; do + chown -v "$uid" "$f" || return 1 + done + find "$dir" \! -gid "$gid" -print0 | while read-0 f; do + chgrp -v "$gid" "$f" || return 1 + done +done + diff --git a/etherpad/hooks/postgres_database-relation-joined b/etherpad/hooks/postgres_database-relation-joined new file mode 100755 index 00000000..9d725c93 --- /dev/null +++ b/etherpad/hooks/postgres_database-relation-joined @@ -0,0 +1,26 @@ +#!/bin/bash + +set -e + +PASSWORD="$(relation-get password)" +USER="$(relation-get user)" +DBNAME="$(relation-get dbname)" + +control=$(echo -en "$USER\0$DBNAME\0$PASSWORD\0$ADMIN_PASSWORD" | md5_compat) + +config-add "\ +services: + $MASTER_BASE_SERVICE_NAME: + environment: + DB_TYPE: postgres + DB_HOST: \"$MASTER_TARGET_SERVICE_NAME\" + DB_NAME: \"$DBNAME\" + DB_PASS: \"$PASSWORD\" + DB_USER: \"$USER\" +" + +[ "$control" == "$(relation-get control 2>/dev/null)" ] && exit 0 + +relation-set control "$control" + +info "Configured $SERVICE_NAME code for $TARGET_SERVICE_NAME access." diff --git a/etherpad/metadata.yml b/etherpad/metadata.yml new file mode 100644 index 00000000..15c6a08d --- /dev/null +++ b/etherpad/metadata.yml @@ -0,0 +1,53 @@ +name: etherpad +summary: "Etherpad-lite server" +maintainer: "Valentin Lab " +inherit: base-0k +## Custom built from git 1.8.14 https://github.com/ether/etherpad-lite with +## build arg --build-arg INSTALL_SOFFICE=1 +docker-image: docker.0k.io/etherpad:1.8.14-soffice ## custom built from git m etherpad/etherpad +description: | + Etherpad-lite service. + +data-resources: + - /var/lib/etherpad + +docker-compose: + command: node src/node/server.js --apikey /var/lib/etherpad/APIKEY.txt + environment: + SOFFICE: '/usr/bin/soffice' + +uses: + postgres-database: + #constraint: required | recommended | optional + #auto: pair | summon | none ## default: pair + constraint: required + auto: summon + solves: + database: "main storage" + default-options: + extensions: + - unaccent + web-proxy: + #constraint: required | recommended | optional + #auto: pair | summon | none ## default: pair + constraint: recommended + auto: pair + solves: + proxy: "Public access" + default-options: + target: !var-expand ${MASTER_BASE_SERVICE_NAME}:9001 + + backup: + constraint: recommended + auto: pair + solves: + backup: "Automatic regular backup" + default-options: + ## First pattern matching wins, no pattern matching includes. + ## include-patterns are checked first, then exclude-patterns + ## Patterns rules: + ## - ending / for directory + ## - '*' authorized + ## - must start with a '/', will start from $SERVICE_DATASTORE + #exclude-patterns: + # - "/var/lib/odoo/sessions/" diff --git a/gitea/metadata.yml b/gitea/metadata.yml index 01dc1ed5..fdd514ff 100644 --- a/gitea/metadata.yml +++ b/gitea/metadata.yml @@ -1,7 +1,8 @@ description: "Gitea Server" maintainer: "Valentin Lab " ## XXXvlab: docker uses the 'build' directory or the 'image:' option here. -docker-image: docker.0k.io/gitea:1.0.0 +## based on gitea/gitea:1.14.2 +docker-image: docker.0k.io/gitea:1.14.2 docker-compose: ports: - "5022:22" diff --git a/gogocarto/README.org b/gogocarto/README.org index b633bc7e..96e51b34 100644 --- a/gogocarto/README.org +++ b/gogocarto/README.org @@ -15,35 +15,28 @@ far from being mature, we try here to get only the released part. We are talking of: [[https://gitlab.adullact.net/pixelhumain/GoGoCarto/-/blob/f3c10f16fc08b533ef44f1325fdb50f87fa73224/docs/installation_docker.md][gogocarto docker install documentation]] -** Intermediate docker image is cached on docker.0k.io if needed +** Updating -To rebuild a new version, I pushed the huge docker image of -=gogocarto= container that is required in the process (that contains -apache and all build/install tools). Note that this should not require -to be rebuild if anything important changes in the directory =docker/=. +This process will ensure to avoid rebuilding the gigantic intermediate +image needed (with apache and build/install tools). -Using: +You need to run this from the root of a code checkout of: +https://gitlab.adullact.net/pixelhumain/GoGoCarto #+begin_src sh docker_tree_hash=$(git rev-parse HEAD:docker) -docker tag docker_gogocarto docker.0k.io/gogocarto-builder:${docker_tree_hash} -docker push docker.0k.io/gogocarto-builder:${docker_tree_hash} +## Intermediate docker image is cached on docker.0k.io if needed +if ! docker pull docker.0k.io/gogocarto-builder:${docker_tree_hash}; then + make build && + docker tag docker_gogocarto docker.0k.io/gogocarto-builder:${docker_tree_hash} && + docker push docker.0k.io/gogocarto-builder:${docker_tree_hash} +else + docker tag docker.0k.io/gogocarto-builder:${docker_tree_hash} docker_gogocarto +fi +make up && +docker-compose -f docker/docker-compose.yml exec gogocarto make init #+end_src -So, if needed, it can be pulled back to avoid the hassle of making it: - -#+begin_src sh -docker_tree_hash=$(git rev-parse HEAD:docker) -docker pull docker.0k.io/gogocarto-builder:${docker_tree_hash} && -docker tag docker.0k.io/gogocarto-builder:${docker_tree_hash} docker_gogocarto -#+end_src - -You still need to do (as of <2020-10-20 Tue>): -- get the latest source code -- =make up= to launch de containers with the images -- =make shell= from the latest git repos -- =make init= in the container - ** Full release is cached on =docker.0k.io/downloads= This is the content of the source tree, once populated by =make init=. @@ -55,7 +48,6 @@ As of <2020-10-23 Fri>, the =Makefile='s =init= target is: #+begin_src sh $ grep ^init Makefile -A1 init: install assets load-fixtures fix-perms ## Initialize the project - #+end_src Note that =load-fixtures= target is actually the mongodb initialization: @@ -71,7 +63,7 @@ load-fixtures: ## Create the DB schema, generate DB classes and load fixtures This will be done in the =hooks/mongo_database-relation-joined= accordingly. -*** create the full data release bundle this is what was made: +*** create the full data release bundle this is what was made #+begin_src sh ## correct bundles links (no idea if it is needed) @@ -94,6 +86,7 @@ commit_sha=$(git describe HEAD --tags) tar cjv \ bin web vendor config src templates translations \ --exclude=.git --exclude=.gitignore \ + --owner=root --group=root \ > gogocarto-${commit_sha}.tar.bz2 #+end_src @@ -103,9 +96,3 @@ all non '\*.{php,yml,js}' files. I noticed many many unrelated files in We need =bin= for symphony utilities that allows to setup things. -* Roadmap - -** TODO mongo / the default database name is probably not correctly set. - -Indeed, it seems it is defaulted to =gogocarto_default= and I didn't find -yet how to set it. There's a connection with SAAS mode I think. diff --git a/gogocarto/hooks/init b/gogocarto/hooks/init index 0fdc87ac..8124bd93 100755 --- a/gogocarto/hooks/init +++ b/gogocarto/hooks/init @@ -16,8 +16,6 @@ set -e -if ! [ -e "$GOGOCARTO_CODE" ]; then - gogocarto:init || exit 1 -fi +gogocarto:init || exit 1 gogocarto:config || exit 1 \ No newline at end of file diff --git a/gogocarto/hooks/mongo_database-relation-joined b/gogocarto/hooks/mongo_database-relation-joined index b7b90e72..899c8422 100755 --- a/gogocarto/hooks/mongo_database-relation-joined +++ b/gogocarto/hooks/mongo_database-relation-joined @@ -24,6 +24,8 @@ MONGODB_URL=$MONGO_URL MONGODB_DATABASE=$DBNAME ###< doctrine/mongodb-odm-bundle ### +DATABASE_NAME=$DBNAME + EOF if [ -e "$inited" ]; then diff --git a/gogocarto/hooks/publish_dir-relation-joined b/gogocarto/hooks/publish_dir-relation-joined index 4c9af2b6..1e8aa92e 100755 --- a/gogocarto/hooks/publish_dir-relation-joined +++ b/gogocarto/hooks/publish_dir-relation-joined @@ -49,6 +49,7 @@ cat < "${GOGOCARTO_CODE}"/web/.htaccess Options -MultiViews RewriteEngine On +RewriteRule ^js/.* - [L] RewriteCond %{REQUEST_FILENAME} !-f RewriteRule ^(.*)$ index.php/\$1 [QSA,L] @@ -59,8 +60,10 @@ config-add " services: $MASTER_TARGET_SERVICE_NAME: volumes: - - $SERVICE_CONFIGSTORE/opt/apps/gogocarto:/opt/apps/gogocarto:rw - - $SERVICE_DATASTORE/var/cache/gogocarto:/opt/apps/gogocarto/var/cache:rw - - $SERVICE_DATASTORE/var/lib/gogocarto/sessions:/opt/apps/gogocarto/var/sessions:rw - - $SERVICE_DATASTORE/var/log/gogocarto:/opt/apps/gogocarto/var/log:rw + - $GOGOCARTO_CODE:$GOGOCARTO_DIR:rw + - $SERVICE_DATASTORE/var/cache/gogocarto:$GOGOCARTO_DIR/var/cache:rw + - $SERVICE_DATASTORE/var/lib/gogocarto/sessions:$GOGOCARTO_DIR/var/sessions:rw + - $SERVICE_DATASTORE/var/log/gogocarto:$GOGOCARTO_DIR/var/log:rw + ## Required to give PHP access to this dir + - $upload_dir:$GOGOCARTO_DIR/web/uploads:rw " diff --git a/gogocarto/hooks/schedule_commands-relation-joined b/gogocarto/hooks/schedule_commands-relation-joined new file mode 100755 index 00000000..3dad3c9f --- /dev/null +++ b/gogocarto/hooks/schedule_commands-relation-joined @@ -0,0 +1,44 @@ +#!/bin/bash + +## When writing relation script, remember: +## - they should be idempotents +## - they can be launched while the dockers is already up +## - they are launched from the host +## - the target of the link is launched first, and get a chance to ``relation-set`` +## - both side of the scripts get to use ``relation-get``. + +. lib/common + +set -e + +## XXXvlab: should use container name here so that it could support +## multiple postgres +label=${SERVICE_NAME} +DST=$CONFIGSTORE/$TARGET_SERVICE_NAME/etc/cron/$label + +## XXXvlab: Should we do a 'docker exec' instead ? +bin_console="dc run -u www-data --rm --entrypoint \\\"$GOGOCARTO_DIR/bin/console\\\" $MASTER_BASE_SERVICE_NAME" + +## Warning: 'docker -v' will use HOST directory even if launched from +## 'cron' container. +file_put "$DST" <&1 | ts '\%F \%T \%Z' >> /var/log/cron/${SERVICE_NAME}-checkvote_script.log + +@daily root lock ${label}-checkExternalSourceToUpdate -D -p 10 -c "\ + $bin_console app:elements:checkExternalSourceToUpdate" 2>&1 | ts '\%F \%T \%Z' >> /var/log/cron/${SERVICE_NAME}-checkExternalSourceToUpdate_script.log + +@daily root lock ${label}-notify-moderation -D -p 10 -c "\ + $bin_console app:notify-moderation" 2>&1 | ts '\%F \%T \%Z' >> /var/log/cron/${SERVICE_NAME}-notify-moderation_script.log + + +@hourly root lock ${label}-sendNewsletter -D -p 10 -c "\ + $bin_console app:users:sendNewsletter" 2>&1 | ts '\%F \%T \%Z' >> /var/log/cron/${SERVICE_NAME}-sendNewsletter_script.log + + +*/5 * * * * root lock ${label}-webhooks-post -D -p 10 -c "\ + $bin_console --env=prod app:webhooks:post" 2>&1 | ts '\%F \%T \%Z' >> /var/log/cron/${SERVICE_NAME}-webhooks-post_script.log + + +EOF +chmod +x "$DST" diff --git a/gogocarto/lib/common b/gogocarto/lib/common index 975b49d6..ff983092 100644 --- a/gogocarto/lib/common +++ b/gogocarto/lib/common @@ -1,15 +1,45 @@ # -*- mode: shell-script -*- - -GOGOCARTO_CODE="$SERVICE_CONFIGSTORE/opt/apps/gogocarto" -GOGOCARTO_RELEASE=3.1.3-2-gf3c10f1 +GOGOCARTO_DIR="/opt/apps/gogocarto" +GOGOCARTO_CODE="$SERVICE_CONFIGSTORE$GOGOCARTO_DIR" +GOGOCARTO_RELEASE=3.1.3-56-g6b8ba361 GOGOCARTO_URL=https://docker.0k.io/downloads/gogocarto-"${GOGOCARTO_RELEASE}".tar.bz2 gogocarto:init() { - mkdir -p "${GOGOCARTO_CODE}" && - cd "${GOGOCARTO_CODE}" && - curl "$GOGOCARTO_URL" | tar xjv + current_version="" + if [ -d "${GOGOCARTO_CODE}" ]; then + current_version=$(cat "${GOGOCARTO_CODE}"/.version) || { + err "Couldn't find ${GOGOCARTO_CODE}/.version file." + echo " Your config dir is in a broken state." >&2 + return 1 + } + else + mkdir -p "${GOGOCARTO_CODE}" && + cd "${GOGOCARTO_CODE}" && + git init . && + git config user.email "root@localhost" && + git config user.name "Root" || { + err "Couldn't create directory ${GOGOCARTO_CODE}, or init it with git." + return 1 + } + fi + if [ "$current_version" != "$GOGOCARTO_RELEASE" ]; then + cd "${GOGOCARTO_CODE}" || return 1 + if [ -d "$PWD"/.git ]; then + rm -rf "$PWD"/* "$PWD"/{.version,.inited-*,.env} || return 1 + else + err "Can't find the '.git' directory in ${GOGOCARTO_CODE}." + return 1 + fi + curl -L "$GOGOCARTO_URL" | tar xjv || { + err "Couldn't download $GOGOCARTO_URL." + return 1 + } + echo "$GOGOCARTO_RELEASE" > .version + git add -A . && + git commit -m "Release $GOGOCARTO_RELEASE" + fi } @@ -21,7 +51,7 @@ gogocarto:config() { cat < "${GOGOCARTO_CODE}"/.env ###> symfony/framework-bundle ### -APP_ENV=$APP_ENV +APP_ENV=${APP_ENV} APP_SECRET=82ec369b81caab5446ddfc3b5edb4d00 CSRF_PROTECTION=$( [ "$APP_ENV" == "prod" ] && diff --git a/gogocarto/metadata.yml b/gogocarto/metadata.yml index 47486f34..05b609bb 100644 --- a/gogocarto/metadata.yml +++ b/gogocarto/metadata.yml @@ -26,3 +26,8 @@ uses: auto: summon solves: database: "main storage" + schedule-commands: + constraint: recommended + auto: pair + solves: + maintenance: "Production scheduled tasks" diff --git a/hedgedoc/hooks/init b/hedgedoc/hooks/init new file mode 100755 index 00000000..eb89fbe0 --- /dev/null +++ b/hedgedoc/hooks/init @@ -0,0 +1,12 @@ +#!/bin/bash + +init-config-add "\ +$SERVICE_NAME: + environment: + CMD_USECDN: \"false\" +" + +## ``codimd`` create uploads folder with wrong permission +uid=$(docker_get_uid "$SERVICE_NAME" "hedgedoc") +mkdir -p "$SERVICE_DATASTORE/hedgedoc/public/uploads" +chown "$uid" "$SERVICE_DATASTORE/hedgedoc/public/uploads" diff --git a/codimd/hooks/postgres_database-relation-joined b/hedgedoc/hooks/postgres_database-relation-joined similarity index 100% rename from codimd/hooks/postgres_database-relation-joined rename to hedgedoc/hooks/postgres_database-relation-joined diff --git a/hedgedoc/hooks/web_proxy-relation-joined b/hedgedoc/hooks/web_proxy-relation-joined new file mode 100755 index 00000000..efbbd1fa --- /dev/null +++ b/hedgedoc/hooks/web_proxy-relation-joined @@ -0,0 +1,22 @@ +#!/bin/bash + +set -e + +DOMAIN=$(relation-get domain) || exit 1 + +## These are mainly to setup the correct web-hook +if [ "$MASTER_BASE_SERVICE_NAME" == "$DOMAIN" ]; then + ## This is because the IP will be the docker container version + USESSL="" +else + USESSL="CMD_PROTOCOL_USESSL: 'true'" +fi + +config-add "\ +services: + $MASTER_BASE_SERVICE_NAME: + environment: + CMD_DOMAIN: $DOMAIN + $USESSL +" + diff --git a/codimd/metadata.yml b/hedgedoc/metadata.yml similarity index 89% rename from codimd/metadata.yml rename to hedgedoc/metadata.yml index 7e4d0e68..d17ee1e2 100644 --- a/codimd/metadata.yml +++ b/hedgedoc/metadata.yml @@ -1,6 +1,6 @@ -docker-image: docker.0k.io/hackmd:2.2.0 ## from: nabo.codimd.dev/hackmdio/hackmd:2.2.0 +docker-image: docker.0k.io/hedgedoc:1.7.2 ## from: quay.io/hedgedoc/hedgedoc:1.7.2-alpine data-resources: - - /home/hackmd/app/public/uploads + - /hedgedoc/public/uploads default-options: diff --git a/logrotate/build/src/entrypoint.sh b/logrotate/build/src/entrypoint.sh index 070398d3..453d12b4 100755 --- a/logrotate/build/src/entrypoint.sh +++ b/logrotate/build/src/entrypoint.sh @@ -10,4 +10,4 @@ do done >> status.clean mv status.clean status -/usr/sbin/logrotate -s /var/lib/logrotate/status /etc/logrotate.conf +/usr/sbin/logrotate -v -s /var/lib/logrotate/status /etc/logrotate.conf diff --git a/mariadb/build/Dockerfile b/mariadb/build/Dockerfile new file mode 100644 index 00000000..5d957d44 --- /dev/null +++ b/mariadb/build/Dockerfile @@ -0,0 +1,19 @@ +FROM alpine:3.9 + +RUN apk add --no-cache mariadb mariadb-client mariadb-server-utils && \ + rm -f /var/cache/apk/* + +## Required by mysql-backup +RUN apk add --no-cache bash gzip && \ + rm -f /var/cache/apk/* + +RUN mkdir -p /run/mysqld && \ + chown -R mysql:mysql /run/mysqld + +RUN sed -i "s|.*bind-address\s*=.*|bind-address=0.0.0.0|g" /etc/my.cnf.d/mariadb-server.cnf + +COPY src/ / + +EXPOSE 3306 + +ENTRYPOINT ["/entrypoint.sh"] diff --git a/mariadb/build/src/entrypoint.sh b/mariadb/build/src/entrypoint.sh new file mode 100755 index 00000000..ae31f706 --- /dev/null +++ b/mariadb/build/src/entrypoint.sh @@ -0,0 +1,20 @@ +#!/bin/sh + + + +if ! [ -d /var/lib/mysql/mysql ]; then + chown -R mysql:mysql /var/lib/mysql + mysql_install_db --user=mysql --ldata=/var/lib/mysql > /dev/null +fi + + + +## Support of Ctrl-C: see https://github.com/docker-library/mysql/issues/47 +run() { + "$@" & + pid="$!" + trap "kill -SIGQUIT $pid" INT TERM + wait +} + +run mysqld --user=mysql --skip-name-resolve --skip-networking=0 "$@" \ No newline at end of file diff --git a/mariadb/hooks/init b/mariadb/hooks/init index 2e8a84c2..2c64b8dc 100755 --- a/mariadb/hooks/init +++ b/mariadb/hooks/init @@ -25,31 +25,7 @@ fi if ! [ -d "$HOST_DATASTORE/${SERVICE_NAME}$DB_DATADIR" ]; then MYSQL_ROOT_PASSWORD="$(gen_password)" - - debug docker run -e "MYSQL_ROOT_PASSWORD=$MYSQL_ROOT_PASSWORD" \ - --rm \ - -v "$DATA_DIR:$DB_DATADIR" \ - --entrypoint /bin/bash "$DOCKER_BASE_IMAGE" - docker run -e "MYSQL_ROOT_PASSWORD=$MYSQL_ROOT_PASSWORD" \ - --rm \ - -v "$DATA_DIR:$DB_DATADIR" \ - --entrypoint /bin/bash "$DOCKER_BASE_IMAGE" -c ' - mysqld() { - echo "diverted mysqld call..." >&2; - echo "$*" | grep -E "(--help|--skip-networking)" >/dev/null 2>&1 || return; - echo " .. Allowing call." >&2; - /usr/sbin/mysqld "$@"; - } - export -f mysqld; - /docker-entrypoint.sh mysqld' || true - ## docker errorlevel is still 0 even if it failed. - ## AND we must ignore mysqld error ! - [ "$(find "$DATA_DIR" \ - -maxdepth 0 -type d -empty 2>/dev/null)" ] && { - err "Docker run probably failed to do it's job." - exit 1 - } - + mkdir -p "${HOST_DB_PASSFILE%/*}" ## XXXvlab: this won't help support multiple project running on the ## same host cat < "$HOST_DB_PASSFILE" @@ -57,5 +33,27 @@ if ! [ -d "$HOST_DATASTORE/${SERVICE_NAME}$DB_DATADIR" ]; then password=$MYSQL_ROOT_PASSWORD EOF chmod 600 "$HOST_DB_PASSFILE" + + ## deactivating final connection check + ddb () { true; } + export -f ddb + ensure_db_docker_running || exit 1 + + docker exec -i "$_DB_NAME" mysql <&1 >/dev/null) || { + err "Docker run probably failed to do it's job." + echo "$err" | prefix " " >&2 + exit 1 + } + info "New root password for mysql. " fi \ No newline at end of file diff --git a/mariadb/hooks/install.d/60-backup.sh b/mariadb/hooks/install.d/60-backup.sh new file mode 100644 index 00000000..c9d91e12 --- /dev/null +++ b/mariadb/hooks/install.d/60-backup.sh @@ -0,0 +1,102 @@ + +set -eux ## important for unbound variable ? + +## Require these to be set +# MYSQL_ROOT_PASSWORD= +# MYSQL_CONTAINER= + +[ "${MYSQL_ROOT_PASSWORD}" ] || { + echo "Error: you must set \$MYSQL_ROOT_PASSWORD prior to running this script." >&2 + exit 1 +} + +[ "${MYSQL_CONTAINER}" ] || { + echo "Error: you must set \$MYSQL_CONTAINER prior to running this script." >&2 + exit 1 +} + + +## +## Init, to setup passwordless connection to mysql +## + +type -p mysql >/dev/null || { + case $(lsb_release -is) in + Debian) + case $(lsb_release -rs) in + 10) + apt-get install -y default-mysql-client ~/.my.cnf +[client] +password=${MYSQL_ROOT_PASSWORD} +EOF + chmod 600 ~/.my.cnf +fi + +## +## installation of the mysql-backup script +## + + +apt-get install -y kal-shlib-{core,pretty,common} /etc/cron.d/mysql-backup +SHELL=/bin/bash +PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin + +0 * * * * root /usr/local/sbin/mysql-backup --host \$(docker-ip "$MYSQL_CONTAINER" 2>/dev/null | sed -r 's/ +/ /g' | cut -f 3 -d " ") | logger -t mysql-backup + +EOF + + +## +## Connection with backup +## + +if type -p mirror-dir >/dev/null 2>&1; then + [ -d "/etc/mirror-dir" ] || { + echo "'mirror-dir' is installed but no '/etc/mirror-dir' was found." >&2 + exit 1 + } + depends shyaml + + if ! sources=$(shyaml get-values default.sources < /etc/mirror-dir/config.yml); then + echo "Couldn't query 'default.sources' in '/etc/mirror-dir/config.yml'." >&2 + exit 1 + fi + + if ! echo "$sources" | grep "^/var/backups/mysql$" 2>/dev/null; then + sed -i '/sources:/a\ - /var/backups/mysql' /etc/mirror-dir/config.yml + cat <> /etc/mirror-dir/config.yml +/var/backups/mysql: + exclude: + - "/*.inprogress" +EOF + fi +else + echo "warn: 'mirror-dir' not installed, backup won't be sent" >&2 +fi + + + diff --git a/mariadb/hooks/schedule_command-relation-joined b/mariadb/hooks/schedule_command-relation-joined index 50e5e5bf..1fe8db03 100755 --- a/mariadb/hooks/schedule_command-relation-joined +++ b/mariadb/hooks/schedule_command-relation-joined @@ -33,12 +33,11 @@ COMPOSE_LAUNCHER_OPTS=$COMPOSE_LAUNCHER_OPTS $schedule root lock $label -D -p 10 -c "\ docker run --rm \ - -e MYSQLHOST="${SERVICE_NAME}" \ --network ${PROJECT_NAME}_default \ -v \"$LOCAL_DB_PASSFILE\":/root/.my.cnf \ -v \"$HOST_CHARM_STORE/${CHARM_REL_PATH#${CHARM_STORE}/}/resources/bin/mysql-backup:/usr/sbin/mysql-backup\" \ -v \"$SERVICE_DATASTORE/var/backups/mysql:/var/backups/mysql\" \ --entrypoint mysql-backup \ - \"$DOCKER_BASE_IMAGE\"" 2>&1 | ts '\%F \%T \%Z' >> /var/log/cron/${label}_script.log + \"$DOCKER_BASE_IMAGE\" --host \"${SERVICE_NAME}\"" 2>&1 | ts '\%F \%T \%Z' >> /var/log/cron/${label}_script.log EOF chmod +x "$DST" diff --git a/mariadb/metadata.yml b/mariadb/metadata.yml index 3c742ee1..676e4933 100644 --- a/mariadb/metadata.yml +++ b/mariadb/metadata.yml @@ -1,6 +1,4 @@ name: MariaDB -## From: mysql Ver 15.1 Distrib 10.0.21-MariaDB -docker-image: docker.0k.io/mariadb:1.0.0 maintainer: "Valentin Lab " provides: mysql-database: @@ -18,7 +16,7 @@ uses: schedule: "31 * * * *" ## schedule backup every hour ## This one is useful only if previous relation is used backup: - constraint: optional + constraint: recommended auto: pair solves: backup: "Automatic regular backups of dumps" diff --git a/mariadb/resources/bin/mysql-backup b/mariadb/resources/bin/mysql-backup index afe5b8fb..9bbaf98a 100755 --- a/mariadb/resources/bin/mysql-backup +++ b/mariadb/resources/bin/mysql-backup @@ -1,5 +1,34 @@ #!/bin/bash + +usage="$exname [--host HOST] [DATABASE...]" + + +DBS=() +host= +while [ "$1" ]; do + case "$1" in + "--help"|"-h") + echo "$usage" >&2 + exit 0 + ;; + "--host") + host="$2" + shift + ;; + *) + DBS+=("$1") + ;; + esac + shift +done + + +mysql_opts=() +if [ "$host" ]; then + mysql_opts+=(-h "$host") +fi + m() { mysql "${mysql_opts[@]}" -Bs "$@" } @@ -14,16 +43,13 @@ mysql_databases() { mysql_tables() { local db="$1" - echo "SHOW TABLES" | m "$db" + echo "SHOW TABLES" | m "$db" } -mysql_opts=() -if [ "$MYSQLHOST" ]; then - mysql_opts+=(-h "$MYSQLHOST") -fi - -DBS=($(mysql_databases)) || exit 1 +if [ "${#DBS[@]}" == 0 ]; then + DBS=($(mysql_databases)) || exit 1 +fi mkdir -p /var/backups/mysql @@ -38,7 +64,7 @@ for db in "${DBS[@]}"; do [ -d "$dst" ] && mv "$dst" "$dst.old" mkdir -p "$dst.inprogress" (( start = SECONDS )) - md "$db" --routines --no-data --add-drop-database --database "$db" | gzip --rsyncable > "$dst.inprogress/schema.sql.gz" + md "$db" --routines --no-data --add-drop-database --database "$db" | gzip --rsyncable > "$dst.inprogress/00-schema.sql.gz" tables=$(mysql_tables "$db") for table in $tables; do backup_file="$dst.inprogress/${table}.sql.gz" diff --git a/monujo/hooks/init b/monujo/hooks/init new file mode 100755 index 00000000..6c830f4c --- /dev/null +++ b/monujo/hooks/init @@ -0,0 +1,27 @@ +#!/bin/bash + +## Init is run on host +## For now it is run every time the script is launched, but +## it should be launched only once after build. + +## Accessible variables are: +## - SERVICE_NAME Name of current service +## - DOCKER_BASE_IMAGE Base image from which this service might be built if any +## - SERVICE_DATASTORE Location on host of the DATASTORE of this service +## - SERVICE_CONFIGSTORE Location on host of the CONFIGSTORE of this service + +set -e + +APP_NAME=monujo +SOURCE_URL="https://docker.0k.io/downloads/$APP_NAME-0.0.1.tar.bz2" +LOCATION="$SERVICE_DATASTORE/opt/apps/$APP_NAME" + +mkdir -p "$LOCATION" +if dir_is_empty "$LOCATION"; then + cd "$LOCATION" + wget -q "$SOURCE_URL" -O file.tar.bz2 + tar xjf file.tar.bz2 + rm file.tar.bz2 + chown root:root "$LOCATION" -R +fi + diff --git a/monujo/metadata.yml b/monujo/metadata.yml new file mode 100644 index 00000000..d7b80287 --- /dev/null +++ b/monujo/metadata.yml @@ -0,0 +1,17 @@ +description: "LokWallet" +maintainer: "Valentin Lab " +subordinate: true + +uses: + publish-dir: + #constraint: required | recommended | optional + #auto: pair | summon | none ## default: pair + scope: container + constraint: required + auto: summon + solves: + container: "main running server" + default-options: + location: !var-expand "$DATASTORE/$BASE_SERVICE_NAME/opt/apps/monujo" + # data-dirs: ## write permission for web-app + # - . diff --git a/mysql/hooks/install b/mysql/hooks/install index a615b583..02262a20 100755 --- a/mysql/hooks/install +++ b/mysql/hooks/install @@ -9,11 +9,11 @@ GIT_0K_CLONE_OPTIONS=${GIT_0K_CLONE_OPTIONS:-""} GIT_0K_BASE=${GIT_0K_BASE:-"git.0k.io:/var/git"} -apt-get install -y --force-yes cron kal-scripts +apt-get install -y cron kal-scripts debconf-set-selections <<< "mysql-server mysql-server/root_password password $MYSQL_PASSWORD" debconf-set-selections <<< "mysql-server mysql-server/root_password_again password $MYSQL_PASSWORD" -apt-get install -y --force-yes mysql-server +apt-get install -y mysql-server diff --git a/nextcloud/build/Dockerfile b/nextcloud/build/Dockerfile deleted file mode 100644 index 445f1cd4..00000000 --- a/nextcloud/build/Dockerfile +++ /dev/null @@ -1,13 +0,0 @@ -## This is a cache of nextcloud:18.0.1 image (gmp is included) -FROM docker.0k.io/nextcloud:1.2.0 - - -## -## What is following is only to patch nextcloud to remove -## some database name checks -## - -COPY database-accept-dots.patch /tmp/ - -RUN cd /usr/src/nextcloud && \ - patch -p1 < /tmp/database-accept-dots.patch \ No newline at end of file diff --git a/nextcloud/build/database-accept-dots.patch b/nextcloud/build/database-accept-dots.patch deleted file mode 100644 index b25e9f5b..00000000 --- a/nextcloud/build/database-accept-dots.patch +++ /dev/null @@ -1,14 +0,0 @@ -diff --git a/lib/private/Setup/AbstractDatabase.php b/lib/private/Setup/AbstractDatabase.php -index 0cbfecf..a821a2e 100644 ---- a/lib/private/Setup/AbstractDatabase.php -+++ b/lib/private/Setup/AbstractDatabase.php -@@ -72,9 +72,6 @@ abstract class AbstractDatabase { - } elseif (empty($config['dbname'])) { - $errors[] = $this->trans->t("%s enter the database name.", [$this->dbprettyname]); - } -- if(substr_count($config['dbname'], '.') >= 1) { -- $errors[] = $this->trans->t("%s you may not use dots in the database name", array($this->dbprettyname)); -- } - return $errors; - } - diff --git a/nextcloud/metadata.yml b/nextcloud/metadata.yml index 309dc45b..f338f544 100644 --- a/nextcloud/metadata.yml +++ b/nextcloud/metadata.yml @@ -1,9 +1,9 @@ +docker-image: docker.0k.io/nextcloud:18.0.1-myc data-resources: - /var/www/html - /var/lib/nextcloud/data config-resources: - /var/www/html/config - provides: nextcloud-app: uses: diff --git a/odoo-tecnativa/actions/install b/odoo-tecnativa/actions/install new file mode 100755 index 00000000..d34de90a --- /dev/null +++ b/odoo-tecnativa/actions/install @@ -0,0 +1,50 @@ +#!/bin/bash + +## Load action gets a first argument a DIRECTORY holding the necessary files. +## +## + +if [ -z "$SERVICE_DATASTORE" ]; then + echo "This script is meant to be run through 'compose' to work properly." >&2 + exit 1 +fi + +usage="$exname [-h|--help] DBNAME [MODULE ...]" + +dbname= +modules=() +while [ "$1" ]; do + case "$1" in + "--help"|"-h") + print_usage + exit 0 + ;; + *) + [ -z "$dbname" ] && { dbname=$1 ; shift ; continue ; } + modules+=("$1") + ;; + esac + shift +done + +if [ -z "$dbname" ]; then + err "You must provide a destination database name as second argument." + print_usage + exit 1 +fi + +if [ -z "${modules[*]}" ]; then + err "You must provide at least one module as third argument." + print_usage + exit 1 +fi + +modules="$(echo "${modules[@]}" | tr " " ",")" + +## This can work only if ~/.my.cnf is correctly created by init. + +set -e + +launch_docker_compose run "$CONTAINER_NAME" --init="$modules" -d "$dbname" --stop-after-init + +info "Installed '$modules' module(s) into database '$dbname'." diff --git a/onlyoffice/hooks/init b/onlyoffice/hooks/init index 6f7a4d78..f373e2c6 100755 --- a/onlyoffice/hooks/init +++ b/onlyoffice/hooks/init @@ -13,9 +13,23 @@ set -e -if ! [ -e "$SERVICE_CONFIGSTORE/etc/onlyoffice/documentserver/local.json" ]; then +image_id=$(service_base_image_id "$SERVICE_NAME") || { + err "couldn't get image id of $SERVICE_NAME." + exit 1 +} + +CONTROL_FILE="$SERVICE_CONFIGSTORE/etc/onlyoffice/.image_id" +if [ "$(cat "$CONTROL_FILE")" != "$image_id" ]; then ## first time we need to extract configuration from image + rm -rf "$SERVICE_CONFIGSTORE/etc/onlyoffice" mkdir -p "$SERVICE_CONFIGSTORE/etc/onlyoffice" service_base_image_export_dir "$SERVICE_NAME" /etc/onlyoffice/documentserver "$SERVICE_CONFIGSTORE/etc/onlyoffice/" -fi - + printf "%s" "$image_id" > "$CONTROL_FILE" +else + ## probably not needed to regenerate fonts + init-config-add "\ +$MASTER_BASE_SERVICE_NAME: + environment: + GENERATE_FONTS: \"false\" +" +fi \ No newline at end of file diff --git a/onlyoffice/hooks/nextcloud_app-relation-joined b/onlyoffice/hooks/nextcloud_app-relation-joined index d58f8542..616d7f4e 100755 --- a/onlyoffice/hooks/nextcloud_app-relation-joined +++ b/onlyoffice/hooks/nextcloud_app-relation-joined @@ -15,7 +15,8 @@ compose --no-relations --no-init \ app:install onlyoffice \; \ config:system:set onlyoffice DocumentServerInternalUrl --value="http://$MASTER_BASE_SERVICE_NAME/" \; \ config:system:set onlyoffice StorageUrl --value="http://$MASTER_TARGET_SERVICE_NAME/" \; \ - config:app:set onlyoffice jwt_secret --value="$KEY" + config:app:set onlyoffice jwt_secret --value="$KEY" \; \ + config:system:set allow_local_remote_servers --type=boolean --value=true ONLYOFFICE_CFG="$SERVICE_CONFIGSTORE/etc/onlyoffice/documentserver/local.json" diff --git a/onlyoffice/hooks/postgres_database-relation-joined b/onlyoffice/hooks/postgres_database-relation-joined new file mode 100755 index 00000000..56a93bab --- /dev/null +++ b/onlyoffice/hooks/postgres_database-relation-joined @@ -0,0 +1,34 @@ +#!/bin/bash + +. lib/common + +set -e + +PASSWORD="$(relation-get password)" +USER="$(relation-get user)" +DBNAME="$(relation-get dbname)" +ADMIN_PASSWORD=$(relation-base-compose-get admin-password 2>/dev/null) || { + if [ -e "$CONFIG" ]; then + ADMIN_PASSWORD=$(grep ^admin_passwd "$CONFIG" | sed -r 's/^admin_passwd\s+=\s+(.+)$/\1/g') + fi + if [ -z "$ADMIN_PASSWORD" ]; then + info "Generating odoo admin password" + ADMIN_PASSWORD=$(gen_password) + fi +} + +database=$(options-get database 2>/dev/null) || true +database="${database:-$DBNAME}" + +config-add "\ +services: + $MASTER_BASE_SERVICE_NAME: + environment: + DB_TYPE: \"postgres\" + DB_HOST: \"$MASTER_TARGET_SERVICE_NAME\" + DB_NAME: \"$DBNAME\" + DB_PWD: \"$PASSWORD\" + DB_USER: \"$USER\" +" + +info "Configured $SERVICE_NAME code for $TARGET_SERVICE_NAME access." diff --git a/onlyoffice/metadata.yml b/onlyoffice/metadata.yml index 0e9d3e31..1d815cc6 100644 --- a/onlyoffice/metadata.yml +++ b/onlyoffice/metadata.yml @@ -1,7 +1,14 @@ -docker-image: docker.0k.io/oods:1.0.0 +# from: https://github.com/0k/Docker-DocumentServer (6.1.0) +docker-image: docker.0k.io/oods:2.0.0 data-resources: - /var/www/onlyoffice/Data - /var/log/onlyoffice + ## not documented but found in entrypoint and docker inspect + - /var/lib/onlyoffice + - /var/lib/postgres + - /var/lib/rabbitmq + - /var/lib/redis + - /usr/share/fonts/truetype/custom config-resources: - /etc/onlyoffice/documentserver @@ -25,3 +32,30 @@ uses: proxy: "Public access" default-options: target: !var-expand ${MASTER_BASE_SERVICE_NAME}:80 + + postgres-database: + constraint: required + auto: summon + solves: + database: "main storage" + + ## XXXvlab: this should not be necessary as official documentation + ## seems to explain that all data are either logs or caches. But we + ## had issues with onlyoffice not sending back modifications to the + ## filesystem. With some tweaks, we can sometimes gets the data from + ## application cache. So for now, we must include this data to + ## backup. + backup: + constraint: recommended + auto: pair + solves: + backup: "Automatic regular backups of dumps" + default-options: + ## First pattern matching wins, no pattern matching includes. + ## include-patterns are checked first, then exclude-patterns + ## Patterns rules: + ## - ending / for directory + ## - '*' authorized + ## - must start with a '/', will start from $SERVICE_DATASTORE + include-patterns: + - /var/lib/onlyoffice/ diff --git a/peertube/build/Dockerfile b/peertube/build/Dockerfile index 58f2a1cd..6dc1ff6d 100644 --- a/peertube/build/Dockerfile +++ b/peertube/build/Dockerfile @@ -1,4 +1,4 @@ -FROM alpine:3.9 AS common +FROM alpine:3.14 AS common RUN apk add gnupg ffmpeg @@ -13,11 +13,10 @@ FROM common AS builder ## Download target release ## -ENV PEERTUBE_RELEASE=v2.1.1 +ENV PEERTUBE_RELEASE=v3.3.0 RUN apk add wget -COPY ./*.patch /tmp RUN mkdir -p /opt/apps/peertube && \ cd /opt/apps/peertube && \ wget https://github.com/Chocobozzz/PeerTube/releases/download/${PEERTUBE_RELEASE}/peertube-${PEERTUBE_RELEASE}.tar.xz && \ @@ -25,12 +24,12 @@ RUN mkdir -p /opt/apps/peertube && \ rm peertube-${PEERTUBE_RELEASE}.tar.xz && \ mv peertube-${PEERTUBE_RELEASE}/* . && \ rmdir peertube-${PEERTUBE_RELEASE} && \ - cat /tmp/*.patch | patch -p1 && \ mkdir -p /etc/peertube /var/lib/peertube && \ ln -sf /var/lib/peertube /opt/apps/peertube/storage -RUN apk add yarn ## Build command -RUN apk add git build-base python bash ## Build deps +RUN apk add yarn ## Build command +RUN apk add git build-base python3 bash && \ + ln -sf /usr/bin/python3 /usr/local/bin/python ## Build deps RUN chown -R peertube:peertube /opt/apps/peertube @@ -39,7 +38,6 @@ RUN apk add npm ## only needed to install things that should be in yarn USER peertube RUN cd /opt/apps/peertube && \ - npm install bcrypt && \ yarn install --production --pure-lockfile && \ yarn cache clean @@ -92,7 +90,7 @@ VOLUME /etc/peertube EXPOSE 9000 -RUN apk add nodejs-npm +RUN apk add nodejs npm ## runtime deps RUN apk add openssl diff --git a/peertube/build/dbname.patch b/peertube/build/dbname.patch deleted file mode 100644 index bb9887bf..00000000 --- a/peertube/build/dbname.patch +++ /dev/null @@ -1,26 +0,0 @@ -diff --git a/dist/server/initializers/checker-before-init.js b/dist/server/initializers/checker-before-init.js -index d8422ee..5eb3678 100644 ---- a/dist/server/initializers/checker-before-init.js -+++ b/dist/server/initializers/checker-before-init.js -@@ -16,7 +16,7 @@ function checkMissedConfig() { - const required = ['listen.port', 'listen.hostname', - 'webserver.https', 'webserver.hostname', 'webserver.port', - 'trust_proxy', -- 'database.hostname', 'database.port', 'database.suffix', 'database.username', 'database.password', 'database.pool.max', -+ 'database.hostname', 'database.port', 'database.dbname', 'database.username', 'database.password', 'database.pool.max', - 'smtp.hostname', 'smtp.port', 'smtp.username', 'smtp.password', 'smtp.tls', 'smtp.from_address', - 'email.body.signature', 'email.subject.prefix', - 'storage.avatars', 'storage.videos', 'storage.logs', 'storage.previews', 'storage.thumbnails', 'storage.torrents', 'storage.cache', -diff --git a/dist/server/initializers/config.js b/dist/server/initializers/config.js -index 6aa916f..89d16fe 100644 ---- a/dist/server/initializers/config.js -+++ b/dist/server/initializers/config.js -@@ -12,7 +12,7 @@ const CONFIG = { - HOSTNAME: config.get('listen.hostname') - }, - DATABASE: { -- DBNAME: 'peertube' + config.get('database.suffix'), -+ DBNAME: config.get('database.dbname'), - HOSTNAME: config.get('database.hostname'), - PORT: config.get('database.port'), - USERNAME: config.get('database.username'), diff --git a/peertube/hooks/init b/peertube/hooks/init index 2bdf594b..ddcb6914 100755 --- a/peertube/hooks/init +++ b/peertube/hooks/init @@ -66,6 +66,7 @@ for section in "${VALID_SECTION[@]}"; do done >> "$HOST_CONFIG_DIR/local.yaml" if ! [ -e "$HOST_DATA_DIR/config.json" ]; then + mkdir -p "$HOST_DATA_DIR" echo "{}" > "$HOST_DATA_DIR/config.json" fi diff --git a/peertube/hooks/postgres_database-relation-joined b/peertube/hooks/postgres_database-relation-joined index 8bcced20..f106b3b6 100755 --- a/peertube/hooks/postgres_database-relation-joined +++ b/peertube/hooks/postgres_database-relation-joined @@ -15,7 +15,7 @@ cat <> "$HOST_CONFIG_DIR/local.yaml" database: hostname: '$TARGET_SERVICE_NAME' ## We had to patch peertube to have a direct dbname (doh!) - dbname: '$DBNAME' + name: '$DBNAME' port: 5432 username: '$USER' password: '$PASSWORD' diff --git a/postgres/metadata.yml b/postgres/metadata.yml index ced6c328..41911440 100644 --- a/postgres/metadata.yml +++ b/postgres/metadata.yml @@ -15,7 +15,7 @@ uses: schedule: "31 * * * *" ## schedule backup every hour ## This one is useful only if previous relation is used backup: - constraint: optional + constraint: recommended auto: pair solves: backup: "Automatic regular backups of dumps" diff --git a/precise/0k-odoo-light/hooks/install b/precise/0k-odoo-light/hooks/install index eae1954e..206b804a 100755 --- a/precise/0k-odoo-light/hooks/install +++ b/precise/0k-odoo-light/hooks/install @@ -15,7 +15,7 @@ DEPS_TO_REMOVE="git" KEEP_ONLY_PO=${KEEP_ONLY_PO:-fr en de} -apt-get install -y --force-yes --no-install-recommends \ +apt-get install -y --no-install-recommends \ $DEPS $DEPS_TO_REMOVE ## XXXvlab: should use base-0k code instead ! @@ -71,7 +71,7 @@ fi RELEASE=jessie VIRTUALENV= hooks/install ) -apt-get remove -y --force-yes $DEPS_TO_REMOVE +apt-get remove -y $DEPS_TO_REMOVE apt-get autoremove -y rm -rf /opt/apps/git-sub /usr/lib/git-core/git-sub diff --git a/precise/apt-cacher/hooks/install b/precise/apt-cacher/hooks/install index f82c24ac..72634298 100755 --- a/precise/apt-cacher/hooks/install +++ b/precise/apt-cacher/hooks/install @@ -3,7 +3,7 @@ set -eux -apt-get install -y --force-yes apt-cacher-ng +apt-get install -y apt-cacher-ng ## This is needed to enable https_port diff --git a/precise/base-0k/hooks/install.d/00-base.sh b/precise/base-0k/hooks/install.d/00-base.sh index f2ae3c9c..8446daa2 100755 --- a/precise/base-0k/hooks/install.d/00-base.sh +++ b/precise/base-0k/hooks/install.d/00-base.sh @@ -2,21 +2,55 @@ set +eux + +## Certificate DST_Root_CA-X3 expired, it needs to be removed +## from list of available certificates. Debian <10 have the issue. +## +## Fixing: https://www.reddit.com/r/sysadmin/comments/pzags0/lets_encrypts_dst_root_ca_x3_expired_yesterday/ +## see also: https://techcrunch.com/2021/09/21/lets-encrypt-root-expiry/?guccounter=1 + +modified_certificate= +mkdir -p /usr/local/share/ca-certificates/custom +for certfile_name in isrgrootx1:ISRG_Root_X1 isrg-root-x2 lets-encrypt-r3; do + certfile=${certfile_name%%:*} + name=${certfile_name#*:} + echo "Checking $certfile for $name" + if ! [ -e "/usr/local/share/ca-certificates/custom/$certfile".crt ] && + ! [ -e "/etc/ssl/certs/$name.pem" ]; then + wget --no-check-certificate https://letsencrypt.org/certs/"$certfile".pem \ + -O "/usr/local/share/ca-certificates/custom/$certfile".crt + modified_certificate=1 + fi +done + +if grep "^mozilla/DST_Root_CA_X3.crt" /etc/ca-certificates.conf 2>/dev/null 2>&1; then + sed -ri 's%^(mozilla/DST_Root_CA_X3.crt)%!\1%g' /etc/ca-certificates.conf +fi + +if [ -n "$modified_certificate" ]; then + update-ca-certificates +fi + +## We can now do the ``apt-get update`` safely... + apt-get update -apt-get -y --force-yes install bash-completion wget bzip2 git-core \ +apt-get -y install bash-completion wget bzip2 git-core \ less tmux mosh \ sudo git vim file /etc/apt/sources.list.d/kalysto.org.list ## vlab's shell libraries @@ -18,7 +18,7 @@ deb https://deb.kalysto.org no-dist kal-alpha kal-beta kal-main EOF if ! type gpg >/dev/null; then - apt-get install -y --force-yes gnupg2 \ + "$backports_list" + ## Update only this repo: + apt-get update -o Dir::Etc::sourcelist="sources.list.d/backports.list" \ + -o Dir::Etc::sourceparts="-" -o APT::Get::List-Cleanup="0" + fi + + ;; + esac + ;; + esac + + apt-get install fzf > /root/.bashrc ## History management -export HISTCONTROL=ignoredups -export HISTSIZE=50000 +export HISTCONTROL=ignoreboth +export HISTSIZE=500000 +export HISTIGNORE="&:[bf]g:exit:ls:history" +export HISTFILESIZE= +export HISTTIMEFORMAT="%Y-%m-%d %T " + shopt -s histappend -PROMPT_COMMAND='history -a' ## Prompt easy management @@ -54,5 +97,36 @@ function glog() { prompt 1 +PROMPT_COMMAND='history -a' ## after prompt setting as it resets it + + +## +## fzf (apt-get install fzf) and fd (apt-get install fd-find) +## + +if [ -e /usr/share/doc/fzf/examples/key-bindings.bash ]; then + . /usr/share/doc/fzf/examples/key-bindings.bash +fi + +if [ -e /usr/share/doc/fzf/examples/completion.bash ]; then + . /usr/share/doc/fzf/examples/completion.bash +fi + +#export FZF_DEFAULT_OPTS="--color 'fg:#bbccdd,fg+:#ddeeff,bg:#111820,preview-bg:#223344,border:#778899'" +export FZF_DEFAULT_COMMAND='fd --type f --hidden --follow --exclude .git' +export FZF_CTRL_T_COMMAND="$FZF_DEFAULT_COMMAND" + + EOF +cat <> /root/.bash_profile + +## XXXvlab: +## http://stackoverflow.com/questions/9652126/bashrc-profile-is-not-loaded-on-new-tmux-session-or-window-why +## Including ``.bashrc`` if it exists (tmux don't load bashrc, and bashrc +## don't load profile... so not recursive call) +if [ -f ~/.bashrc ]; then + . ~/.bashrc +fi + +EOF diff --git a/precise/ca/hooks/install b/precise/ca/hooks/install index cdbdb9e0..69b6b7d3 100755 --- a/precise/ca/hooks/install +++ b/precise/ca/hooks/install @@ -2,7 +2,7 @@ set -eux # -x for verbose logging to juju debug-log -apt-get install -y --force-yes kal-manage ## this is for ``mkcrt`` +apt-get install -y kal-manage ## this is for ``mkcrt`` CA_SUBJECT=${CA_SUBJECT:-/C=FR/ST=France/O=Kalysto/CN=kal.fr/emailAddress=ca@kal.fr} diff --git a/precise/git/hooks/install b/precise/git/hooks/install index 0c6f1157..7cbebdb4 100755 --- a/precise/git/hooks/install +++ b/precise/git/hooks/install @@ -9,7 +9,7 @@ GIT_0K_BASE=${GIT_0K_BASE:-"0k-ro:/var/git"} GIT_0K_CLONE_OPTIONS=${GIT_0K_CLONE_OPTIONS:-""} -apt-get install -y --force-yes kal-shlib-common +apt-get install -y kal-shlib-common apt-get install -y bzr diff --git a/precise/host/hooks/install.d/38-ntp.sh b/precise/host/hooks/install.d/38-ntp.sh new file mode 100755 index 00000000..a9eea567 --- /dev/null +++ b/precise/host/hooks/install.d/38-ntp.sh @@ -0,0 +1,4 @@ +#!/bin/bash + +apt-get install ntp -y /etc/default/lxc-net +USE_LXC_BRIDGE="true" + +# If you change the LXC_BRIDGE to something other than lxcbr0, then +# you will also need to update your /etc/lxc/default.conf as well as the +# configuration (/var/lib/lxc//config) for any containers +# already created using the default config to reflect the new bridge +# name. +# If you have the dnsmasq daemon installed, you'll also have to update +# /etc/dnsmasq.d/lxc and restart the system wide dnsmasq daemon. +LXC_BRIDGE="lxcbr0" +LXC_ADDR="172.101.0.1" +LXC_NETMASK="255.255.255.0" +LXC_NETWORK="172.101.0.0/24" +LXC_DHCP_RANGE="172.101.0.2,172.101.0.254" +LXC_DHCP_MAX="253" +# Uncomment the next line if you'd like to use a conf-file for the lxcbr0 +# dnsmasq. For instance, you can use 'dhcp-host=mail1,172.46.0.100' to have +# container 'mail1' always get ip address 172.46.0.100. +LXC_DHCP_CONFILE=/etc/lxc/dnsmasq.conf + +# Uncomment the next line if you want lxcbr0's dnsmasq to resolve the .lxc +# domain. You can then add "server=/lxc/172.46.0.1' (or your actual ) +# to /etc/dnsmasq.conf, after which 'container1.lxc' will resolve on your +# host. +#LXC_DOMAIN="lxc" + +EOF + } + + service lxc-net restart +fi \ No newline at end of file diff --git a/precise/host/hooks/install.d/60-docker.sh b/precise/host/hooks/install.d/60-docker.sh index e47419d5..5cfbd872 100755 --- a/precise/host/hooks/install.d/60-docker.sh +++ b/precise/host/hooks/install.d/60-docker.sh @@ -1,26 +1,32 @@ #!/bin/bash -need_restart= + +just_installed= if ! type -p docker; then echo "Installing docker..." - curl -sSL https://get.docker.io | sh -fi + type -p curl >dev/null || + apt-get install -y curl /dev/null; then - sed -ri 's/^(ExecStart=.*)$/\1 --disable-legacy-registry=false/g' /lib/systemd/system/docker.service - need_restart=true -fi +if [ -n "$just_installed" ]; then + need_restart= + docker_version=17 + if ! [[ "$(docker --version)" == "Docker version $docker_version"* ]]; then + version="$(apt-cache madison docker-ce | + cut -f 2 -d \| | + grep "$docker_version" | + head -n 1 | xargs echo)" + ## DOWNGRADE to 17.xx because 18.xx do not support registry v1 + apt-get install -y --allow-downgrades docker-ce="$version" + need_restart=true + fi -if [ "$need_restart" ]; then - systemctl daemon-reload && - service docker restart + if [ -n "$need_restart" ] && [ -z "$NO_DOCKER_RESTART" ]; then + systemctl daemon-reload && + service docker restart + fi fi \ No newline at end of file diff --git a/precise/host/hooks/install.d/61-mirror-dir.sh b/precise/host/hooks/install.d/61-mirror-dir.sh new file mode 120000 index 00000000..b3561dd4 --- /dev/null +++ b/precise/host/hooks/install.d/61-mirror-dir.sh @@ -0,0 +1 @@ +../../../../rsync-backup/hooks/install.d/60-install.sh \ No newline at end of file diff --git a/precise/host/hooks/install.d/70-0k.sh b/precise/host/hooks/install.d/70-0k.sh index 1be350a9..f4aabbdf 100755 --- a/precise/host/hooks/install.d/70-0k.sh +++ b/precise/host/hooks/install.d/70-0k.sh @@ -8,6 +8,7 @@ GIT_0K_BASE=${GIT_0K_BASE:-"0k-ro:/var/git"} ## 0k git remote options GIT_0K_CLONE_OPTIONS=${GIT_0K_CLONE_OPTIONS:-""} +NO_DOCKER_RESTART=${NO_DOCKER_RESTART:-} ## ## Install 0k-manage @@ -25,7 +26,11 @@ mkdir -p /opt/apps git checkout 0k/prod/master fi - pip install sact.epoch || exit 1 + ## Debian 9 did not have setuptool + if [ "$(python -c 'import setuptools' 2>&1 | tail -n 1)" == "ImportError: No module named setuptools" ]; then + pip install setuptools + fi + pip install sact.epoch if [ "$(python -c 'import sact.epoch' 2>&1 | tail -n 1)" == "ImportError: No module named interface" ]; then echo "Error: conflicting installation of zope.interface detected. Trying workaround." ( @@ -41,8 +46,8 @@ mkdir -p /opt/apps exit 1 fi fi + ln -sf /opt/apps/0k-manage/src/bin/pick2del_backups /usr/local/bin/ # ln -sf /opt/apps/0k-manage/src/bin/* /usr/local/bin/ - ) @@ -51,7 +56,7 @@ mkdir -p /opt/apps ## if [ -f /etc/compose/local.conf ]; then - sed -ri 's%^(. /opt/venv/docker-compose/bin/activate)$%# \1 ## docker-compsoe not needed anymore%g' \ + sed -ri 's%^(. /opt/venv/docker-compose/bin/activate)$%# \1 ## docker-compose not needed anymore%g' \ /etc/compose/local.conf fi @@ -61,7 +66,7 @@ fi ## ( - apt-get install -y kal-shlib-charm kal-shlib-cache kal-shlib-cmdline /etc/default/datastore DATASTORE=/srv/datastore -SNAPSHOT_BACKUP=/var/backups/snapshot EOF cat < /etc/default/compose @@ -243,7 +248,7 @@ export CONFIGSTORE=\$DOCKER_DATASTORE/config EOF -if ! egrep "^DEFAULT_COMPOSE_FILE=/etc/compose/compose.yml$" /etc/compose/local.conf >/dev/null 2>&1; then +if ! egrep "^DEFAULT_COMPOSE_FILE=" /etc/compose/local.conf >/dev/null 2>&1; then mkdir /etc/compose -p touch /etc/compose/local.conf echo "DEFAULT_COMPOSE_FILE=/etc/compose/compose.yml" >> /etc/compose/local.conf @@ -268,6 +273,6 @@ fi ln -sfnv /opt/apps/0k-pgm/bin/* /usr/local/bin/ find -L /usr/local/bin -maxdepth 1 -type l -ilname /opt/apps/0k-pgm/bin/\* -delete - apt-get install -y --force-yes pv buffer < /dev/null + apt-get install -y pv buffer < /dev/null apt-get install -y postgresql-client > /etc/default/lxc sed -ri "s%10\.0\.3\.%$LXC_NETWORK.%g;s%^#LXC_DHCP_CONFILE=%LXC_DHCP_CONFILE=%g" /etc/default/lxc-net @@ -23,6 +23,7 @@ HOST_IP=$(. /etc/default/lxc && ifip "$HOST_EXTERNAL_DEVICE") echo " server=$LXC_ADDR interface=lo +bind-interfaces no-negcache log-queries log-facility=/var/log/dnsmasq.log @@ -30,9 +31,11 @@ log-facility=/var/log/dnsmasq.log echo " server=${HOST_IP} +bind-interfaces log-queries no-negcache log-facility=/var/log/lxc-dnsmasq.log +no-resolv " >> /etc/lxc/dnsmasq.conf ( @@ -44,16 +47,18 @@ log-facility=/var/log/lxc-dnsmasq.log mkdir /var/log/named -p && chown bind:bind /var/log/named -/etc/init.d/bind9 restart -/etc/init.d/dnsmasq restart +/etc/init.d/bind9 stop +/etc/init.d/dnsmasq stop + service lxc restart service lxc-net restart ## had to 'brctl delbr lxcbr0' myself +/etc/init.d/dnsmasq start +/etc/init.d/bind9 start + cp /etc/resolv.conf{,.orig} cat < /etc/resolv.conf nameserver 127.0.0.1 -#domain . ## didn't work on 12.04 -search localdomain ## imperfect, we don't want to search www.localdomain EOF ## @@ -69,7 +74,7 @@ cat < /etc/logrotate.d/dnsmasq compress postrotate - kill -s SIGUSR2 "\$(cat /var/run/dnsmasq/dnsmasq.pid)" + /bin/kill -s SIGUSR2 "\$(cat /var/run/dnsmasq/dnsmasq.pid)" endscript } @@ -85,7 +90,7 @@ cat < /etc/logrotate.d/lxc-dnsmasq compress postrotate - kill -s SIGUSR2 "\$(cat /var/run/lxc/dnsmasq.pid)" + /bin/kill -s SIGUSR2 "\$(cat /var/run/lxc/dnsmasq.pid)" endscript } diff --git a/precise/host/hooks/install.d/90-shorewall.sh b/precise/host/hooks/install.d/90-shorewall.sh index 9a74a563..bd166d32 100755 --- a/precise/host/hooks/install.d/90-shorewall.sh +++ b/precise/host/hooks/install.d/90-shorewall.sh @@ -8,6 +8,8 @@ ## Install ## +HOST_EXTERNAL_DEVICE=${HOST_EXTERNAL_DEVICE:-eth0} + version_gt() { test "$(echo "$@" | tr " " "\n" | sort -V | head -n 1)" != "$1"; } shorewall_candidate_version=$(echo $(apt-cache policy shorewall | grep "Candidate:" | cut -f 2 -d :)) @@ -29,23 +31,179 @@ else } fi +case $(lsb_release -is) in + Debian) + case $(lsb_release -rs) in + 10) + ## we had trouble with ``nft`` shorewall + update-alternatives --set iptables /usr/sbin/iptables-legacy + ;; + esac + ;; +esac + + + +apt-get install -y dnsutils /etc/shorewall/README +Important notes gathered through time: + + +# Shorewall duties on our host + +- block any access from outside to local ports if not mentionned + explicitely in shorewall. + +- connect external ports to LXC (dockers has its own means) + - This uses ``/var/lib/lxc/*/shorewall`` files + +- let mosh connect correctly + +- ensure a correct access from Host/LXC/Docker to server's services. + For instance, an Host/LXC/Docker should be able to as if it was + external: ``curl https://myhostwebsite``. This is called routeback + and requires some special rules. + + +# Shorewall restarting and cache + +Some process in shorewall seems to be using cache in some ways in +recent version that implies that it won't take actions if files are +not changed. A simple 'touch FILE' seems to be enough. Notice the +'Compiling' lines appearing in ``shorewall restart``. + +It's always good to double-check in ``iptables -nL`` that some rules +actually seem to match your intention. + +Don't forget that ``iptables-save`` is probably the best way to get +the full rules printed on stdout. + + +# Debian, ovh kernels and iptables-nft + +Starting from Debian10, iptables by default uses iptables-nft... which +works well with default debian kernel. OVH kernels DO NOT provide +necessary kernel and we must: + + update-alternatives --set iptables /usr/sbin/iptables-legacy + +Note that transition is a little tricky because BOTH ways can have +their tables simultaneously. Use ``iptables-nft -nL`` and +``iptables-legacy -nL`` to check. + +For now, we had little success to properly have the ``nft`` version +working properly on debian kernel. So even on debian kernel, we switch +here to iptables-legacy if on debian system. + + +# Interaction with docker's iptables rules + +This is configured in ``shorewall.conf``, thanks to a simple:: + + DOCKER=Yes + + +# Route back + +Be sure to check in /var/lib/lxc/*/shorewall definitions, they +must include special stances (see in next section). + +On the side of shorewall, all network interface should be declared in +``/etc/shorewall/interfaces``. + + +# lxc ``shorewall`` files + +Prefer the usage of ``ports`` files. If you insist on having a better +control of rules per LXC, you can use ``shorewall`` files. + +They should be located in /var/lib/lxc/*/shorewall. This is a standard +redirection from external host port 10022 to lxc's port 22, on port +tcp:: + + DNAT net lan:%%IP%%:22 tcp 10022 + #DNAT net lan:%%IP%%:22 udp 10022 + +Routeback (access of the same service from Host/LXC/Docker on the external +address) is given by these additional rules:: + + DNAT lan lan:www:80 tcp 80 - %%HOST_INTERNET_IP%% + DNAT lan lan:www:443 tcp 443 - %%HOST_INTERNET_IP%% + + DNAT fw lan:www:80 tcp 80 - %%HOST_INTERNET_IP%% + DNAT fw lan:www:443 tcp 443 - %%HOST_INTERNET_IP%% + + +# lxc ``ports`` files + +They should be located in /var/lib/lxc/*/ports. This is a standard +redirection from external host port 10022 to lxc's port 22, on both +tcp and udp:: + + 10022:22 ## Normal port + # 10023:23 ## This is commented ! + +Note that comments are supported also. + + +EOF + + + cat < /etc/shorewall/zones fw firewall net ipv4 lan ipv4 EOF +cat < /etc/shorewall/macro.Mosh +####################################################################################################### +# DO NOT REMOVE THE FOLLOWING LINE +############################################################################################################################################################## +#ACTION SOURCE DEST PROTO DPORT SPORT ORIGDEST RATE USER MARK CONNLIMITTIME HEADERS SWITCH HELPER +# + +PARAM - - udp 60000:61000 +EOF + + + cat < /etc/shorewall/interfaces #ZONE INTERFACE BROADCAST OPTIONS -net eth0 +net $HOST_EXTERNAL_DEVICE ## Uncomment to enable vpn setup #vpn tun0 detect + + +## All interfaces that require route back should be listed +## here: lan lxcbr0 - routeback + +BEGIN SHELL + +ifconfig=\$(ifconfig) + +echo "BEGIN DOCKER adding networks rules:" >&2 +for docker_net in \$(docker network list -f driver=bridge -q); do + gws=\$(docker network inspect "\$docker_net" --format "{{range .IPAM.Config}}{{.Gateway}}{{\"\n\"}}{{end}}") || continue + for gw in \$gws; do + if=\$(printf "%s" "\$ifconfig" | egrep "\$gw" -B 1 | head -n 1 | cut -f 1 -d " ") + echo " lan \$if - routeback" >&2 + echo "lan \$if - routeback" + done +done +echo "END DOCKER" >&2 + +true + +END SHELL + EOF cat < /etc/shorewall/policy @@ -61,16 +219,31 @@ cat < /etc/shorewall/rules SSH/ACCEPT net fw Ping/ACCEPT net fw +Mosh(ACCEPT) net fw BEGIN SHELL -host_ip="\$(/sbin/ifconfig eth0 2> /dev/null | sed "s/^.*inet ad\+r://g" | grep ^[0-9] | sed "s/ .*$//g")" +host_ip="\$(/sbin/ifconfig $HOST_EXTERNAL_DEVICE 2> /dev/null | sed "s/^.*inet //g" | grep ^[0-9] | sed "s/ .*$//g")" for name in \$(lxc-ls-running); do ip=\$(dig +short A "\$name") [ -e "/var/lib/lxc/\$name/shorewall" ] && - cat /var/lib/lxc/\$name/shorewall | sed -r "s/%%HOST_INTERNET_IP%%/\$host_ip/g" \ - | sed -r "s/%%IP%%/\$ip/g" + cat /var/lib/lxc/\$name/shorewall | + sed -r "s/%%HOST_INTERNET_IP%%/\$host_ip/g" | + sed -r "s/%%IP%%/\$ip/g" + + if [ -e "/var/lib/lxc/\$name/ports" ]; then + for ports in \$(cat /var/lib/lxc/\$name/ports | sed -r 's/#.*\$//g'); do + lxc_port=\${ports#*:} + ext_port=\${ports%:*} + echo "LXC \$name: redirection from \$host_ip:\$ext_port -> \$ip:\$lxc_port" >&2 + for proto in tcp udp; do + for zone in net lan fw; do + echo "DNAT \$zone lan:\$ip:\$lxc_port \$proto \$ext_port - \$host_ip" + done + done + done + fi done @@ -81,7 +254,7 @@ END SHELL EOF cat < /etc/shorewall/masq -eth0 lxcbr0 +$HOST_EXTERNAL_DEVICE lxcbr0 EOF cat < /etc/shorewall/start @@ -90,12 +263,19 @@ cat < /etc/shorewall/start . /etc/default/lxc -if [ -d "/sys/class/net/\$LXC_BRIDGE" -a "\$(cat /sys/class/net/\$LXC_BRIDGE/operstate)" == "up" ]; then - source_file=/etc/init/lxc-net.conf - code=\$(egrep '^\s+iptables.*\s+-j\s+' /etc/init/lxc-net.conf | grep -v '\-D' | sed -r 's/^\s+[^-]+/run_iptables /g') - echo "Adding LXC rules:" - echo "\$code" - eval "\$code" +if [ -d "/sys/class/net/\$LXC_BRIDGE" ] && [ "\$(cat /sys/class/net/\$LXC_BRIDGE/operstate)" = "up" ]; then + source_file= + if [ -e /etc/init/lxc-net.conf ]; then + source_file=/etc/init/lxc-net.conf + elif [ -e /usr/lib/x86_64-linux-gnu/lxc/lxc-net ]; then + source_file=/usr/lib/x86_64-linux-gnu/lxc/lxc-net + fi + if [ "\$source_file" ]; then + code=\$(egrep '^\s+iptables.*\s+-j\s+' \$source_file | grep -v '\-D' | sed -r 's/^\s+[^-]+/run_iptables /g') + echo "Adding LXC rules:" + echo "\$code" + eval "\$code" + fi fi EOF @@ -112,9 +292,26 @@ EOF apt-get install -y moreutils ## needed because ``ts`` is used in this script ln -sf /opt/apps/lxc-scripts/etc/cron.d/lxc-shorewall-repair /etc/cron.d/lxc-shorewall-repair +cat < /etc/logrotate.d/lxc-shorewall-repair +/var/log/lxc-shorewall-repair.log { + weekly + missingok + dateext + dateyesterday + dateformat _%Y-%m-%d + extension .log + rotate 52 + compress + delaycompress + notifempty + create 640 root root + sharedscripts +} +EOF + ## -## Logs +## LOGS ## mkdir -p /var/log/shorewall @@ -124,25 +321,32 @@ chmod g+w /var/log/shorewall cat < /etc/rsyslog.d/shorewall.conf :msg, contains, "Shorewall:" /var/log/shorewall/main.log & ~ -EOF -cat < /etc/logrotate.d/shorewall -/var/log/shorewall/init.log { - weekly - rotate 4 - compress - missingok - create 0640 root adm +if \$msg contains 'net-fw DROP IN=' then { + action(type="omfile" file="/var/log/shorewall/net-fw.log") + stop } + +EOF + +cat < /etc/logrotate.d/shorewall +/var/log/shorewall/init.log +/var/log/shorewall/net-fw.log /var/log/shorewall/main.log { - rotate 7 weekly missingok - notifempty + dateext + dateyesterday + dateformat _%Y-%m-%d + extension .log + rotate 52 compress delaycompress + notifempty + create 640 root root + sharedscripts postrotate reload rsyslog >/dev/null 2>&1 || true endscript @@ -157,9 +361,12 @@ service rsyslog restart ## -## +## Final settings ## ## Activate support for docker sed -ri 's/^DOCKER=No$/DOCKER=Yes/g' /etc/shorewall/shorewall.conf + + +sed -ri 's/^IP_FORWARDING=Keep$/IP_FORWARDING=On/g' /etc/shorewall/shorewall.conf diff --git a/precise/host/hooks/install.d/95-checks.sh b/precise/host/hooks/install.d/95-checks.sh index e1918e6a..9830af36 100755 --- a/precise/host/hooks/install.d/95-checks.sh +++ b/precise/host/hooks/install.d/95-checks.sh @@ -2,4 +2,39 @@ ## REQUIRES: 0k-manage mail -ln -sf /opt/apps/0k-manage/src/etc/cron.hourly/check-* /etc/cron.hourly/ +[ -n "${BACKUP_SERVER}" ] || { + echo "Error: you must set \$BACKUP_SERVER prior to running this script." >&2 + exit 1 +} + +if ! [ -e "/etc/default/alerting" ]; then + if [ -z "$DEFAULT_ALERTING_EMAILS" ]; then + echo "You must define \$DEFAULT_ALERTING_EMAILS before launching this script." >&2 + exit 1 + fi + cat < /etc/default/alerting +MAIL_DESTS=( +$( +for email in $DEFAULT_ALERTING_EMAILS; do +echo " $email" +done +) +) +EOF +fi + +ln -sfv /opt/apps/0k-manage/src/etc/cron.hourly/check-* /etc/cron.hourly/ + +if ! [ -e /usr/local/sbin/mirror-dir ]; then + ln -sfv /opt/apps/0k-charms/rsync-backup/resources/bin/mirror-dir /usr/local/sbin/ +fi + +mailname=$(cat /etc/mailname) +mailname=${mailname%.localdomain} +cat < /etc/cron.d/mirror-dir-check +SHELL=/bin/bash +PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin + +35 * * * * root mirror-dir check -d "$BACKUP_SERVER:10023" -n '12 hours' | logger -t mirror-dir-check + +EOF \ No newline at end of file diff --git a/precise/host/hooks/install.d/96-backup-lxc.sh b/precise/host/hooks/install.d/96-backup-lxc.sh index ebf4908b..b2dc2336 100755 --- a/precise/host/hooks/install.d/96-backup-lxc.sh +++ b/precise/host/hooks/install.d/96-backup-lxc.sh @@ -6,5 +6,75 @@ ## Backup lxc ## -ln -sf /opt/apps/lxc-scripts/etc/cron.hourly/* /etc/cron.hourly/ -ln -sf /opt/apps/lxc-scripts/etc/cron.daily/* /etc/cron.daily/ +ln -sfv /opt/apps/lxc-scripts/etc/cron.hourly/* /etc/cron.hourly/ +ln -sfv /opt/apps/lxc-scripts/etc/cron.daily/* /etc/cron.daily/ + + +if ! grep ^BACKUP_LXC_PATH= /etc/default/lxc >/dev/null 2>&1; then + echo "BACKUP_LXC_PATH=/var/backups/lxc" >> /etc/default/lxc +fi + + +if ! grep ^BACKUP_SNAPSHOT_PATH= /etc/default/datastore >/dev/null 2>&1; then + echo "BACKUP_SNAPSHOT_PATH=/var/backups/snapshot" >> /etc/default/datastore +fi + + +## +## Mirror dir's logrotate and rsyslog's entry +## + + +mkdir -p /etc/mirror-dir +cat < /etc/mirror-dir/config.yml +default: + sources: + - /etc + - /opt + - /root + - /var/log + - /var/lib/lxc + - /home + - /boot + - /srv + - /var/backups/lxc/latest + +/var/lib/lxc: + exclude: + - /*/rootfs +EOF + + +cat < /etc/rsyslog.d/mirror-dir.conf + +if \$programname == 'mirror-dir' then { + action(type="omfile" file="/var/log/mirror-dir.log") + stop +} + +EOF + +service rsyslog restart + + +cat < /etc/logrotate.d/mirror-dir.log +/var/log/mirror-dir.log +{ + weekly + missingok + dateext + dateyesterday + dateformat _%Y-%m-%d + extension .log + rotate 52 + compress + delaycompress + notifempty + create 640 root root + sharedscripts + postrotate + reload rsyslog >/dev/null 2>&1 || true + endscript +} + +EOF diff --git a/precise/mirror/hooks/install b/precise/mirror/hooks/install index c6095194..76de81d1 100755 --- a/precise/mirror/hooks/install +++ b/precise/mirror/hooks/install @@ -7,7 +7,7 @@ set -eux # -x for verbose logging to juju debug-log ## kal-manage provides the script /usr/lib/kal/dusk/sbin/ssh-cmd-validate ## used to validate any entrant connection to SSH. -apt-get install -y --force-yes rsync kal-manage +apt-get install -y rsync kal-manage mkdir -p /var/mirror diff --git a/precise/pypi-cacher/hooks/install b/precise/pypi-cacher/hooks/install index 364e6f1e..bd9a8e18 100755 --- a/precise/pypi-cacher/hooks/install +++ b/precise/pypi-cacher/hooks/install @@ -2,7 +2,7 @@ set -eux -apt-get install -y --force-yes python-pip +apt-get install -y python-pip pip install devpi-server diff --git a/precise/svn/hooks/install b/precise/svn/hooks/install index 5c9cff74..e831c1f9 100755 --- a/precise/svn/hooks/install +++ b/precise/svn/hooks/install @@ -3,7 +3,7 @@ set -eux # -x for verbose logging to juju debug-log -apt-get install -y --force-yes kal-shlib-pretty kal-scripts subversion +apt-get install -y kal-shlib-pretty kal-scripts subversion mkdir -p /var/svn diff --git a/precise/vpn/hooks/install b/precise/vpn/hooks/install index c64b4508..033a8835 100755 --- a/precise/vpn/hooks/install +++ b/precise/vpn/hooks/install @@ -2,7 +2,7 @@ set -eux -apt-get install -y --force-yes wget git kal-scripts python +apt-get install -y wget git kal-scripts python if test -z "${RELEASE:-}"; then if type -p lsb_release; then @@ -19,7 +19,7 @@ fi # ## Update only this repo: # apt-get update -o Dir::Etc::sourcelist="sources.list.d/swupdate.openvpn.net.list" \ # -o Dir::Etc::sourceparts="-" -o APT::Get::List-Cleanup="0" -# apt-get -y --force-yes install openvpn +# apt-get -y install openvpn export DEBIAN_FRONTEND=noninteractive DEBCONF_NONINTERACTIVE_SEEN=true dpkg -i deb/openvpn_*.deb || true @@ -45,7 +45,7 @@ mkdir -p /var/run/openvpn /var/log/openvpn mkdir -p /opt/apps ( - apt-get install -y --force-yes python-setuptools python-twisted python-crypto python-yaml python-pyptlib + apt-get install -y python-setuptools python-twisted python-crypto python-yaml python-pyptlib cd /opt/apps && git clone https://git.torproject.org/pluggable-transports/obfsproxy.git && cd obfsproxy && @@ -54,7 +54,7 @@ mkdir -p /opt/apps ## obfs4proxy does not work with OpenVPN for now. # ( -# apt-get install --force-yes -y golang && +# apt-get install -y golang && # cd /opt/apps && # mkdir obfs4 && # cd obfs4 && diff --git a/rocketchat/README.org b/rocketchat/README.org new file mode 100644 index 00000000..c02c9f88 --- /dev/null +++ b/rocketchat/README.org @@ -0,0 +1,60 @@ +# -*- ispell-local-dictionary: "english" -*- +#+SETUPFILE: ~/.emacs.d/etc/setup/latex.setup +#+SETUPFILE: ~/.emacs.d/etc/setup/html-readtheorg-local.setup + +#+TITLE: Rocket.Chat + +* Updating the charm to a new version + +We are using official image. Latest tags usually. + +** Test new version + +Rocket.chat has a powerfull and working database update mecanism that +will take care of migrating database on startup. + +*** Get latest available versions + +You can double-check available candidate for official images like this: + +#+begin_src sh +docker-tags-fetch rocketchat/rocket.chat -l 15 -f "^[0-9]+\.[0-9]+\.[0-9]+$" | sort -rV +#+end_src + +Check/Choose the version you want to test. + +*** Modify your own =compose.yml= + +By adding these 2 lines in your rocket chat service: + +#+begin_src yaml + docker-compose: + image: rocketchat/rocket.chat:X.Y.Z +#+end_src + +Replace X.Y.Z by the target version you want to test. + +Launch =compose up=. + +Be ready to wait a few minutes after =compose up= finished before the +service to be available: rocketchat is expected to take some time to +migrate. + + +** Change the current charm to include new version + +To prepare the commit for next version, you can run the following +on the repository you'll use to push the new commit. + +#+begin_src sh +BASENAME=rocketchat/rocket.chat +VERSION=$(docker-tags-fetch "$BASENAME" -l 15 -f "^[0-9]+\.[0-9]+\.[0-9]+$" | sort -rV | head -n 1) +echo Last version of rocket chat: $VERSION +docker pull rocketchat/rocket.chat:"$VERSION" && +docker tag rocketchat/rocket.chat:"$VERSION" docker.0k.io/rocketchat:"$VERSION" && +docker push docker.0k.io/rocketchat:"$VERSION" && +sed -ri "s%^(docker-image: docker.0k.io/rocketchat:).*%\1$VERSION%" metadata.yml && +sed -ri "s%^(#docker-image: rocketchat/rocket.chat:).*%\1$VERSION%" metadata.yml +#+end_src + +You can review the changes and commit them. \ No newline at end of file diff --git a/rocketchat/metadata.yml b/rocketchat/metadata.yml index 59d7857a..e555b5da 100644 --- a/rocketchat/metadata.yml +++ b/rocketchat/metadata.yml @@ -1,7 +1,7 @@ summary: "Rocket Chat server" maintainer: "Valentin Lab " -#docker-image: rocket.chat:3.6.3 -docker-image: docker.0k.io/rocketchat:3.6.3 +#docker-image: rocketchat/rocket.chat:3.18.1 +docker-image: docker.0k.io/rocketchat:3.18.1 data-resources: - /app/uploads uses: diff --git a/rsync-backup-target/README.org b/rsync-backup-target/README.org new file mode 100644 index 00000000..bc8c6292 --- /dev/null +++ b/rsync-backup-target/README.org @@ -0,0 +1,125 @@ +#+PROPERTY: Effort_ALL 0 0:30 1:00 2:00 0.5d 1d 1.5d 2d 3d 4d 5d +#+PROPERTY: Max_effort_ALL 0 0:30 1:00 2:00 0.5d 1d 1.5d 2d 3d 4d 5d +#+PROPERTY: header-args:python :var filename=(buffer-file-name) +#+PROPERTY: header-args:sh :var filename=(buffer-file-name) +#+TODO: TODO WIP BLOCKED | DONE CANCELED +#+LATEX_HEADER: \usepackage[margin=0.5in]{geometry} +#+LaTeX_HEADER: \hypersetup{linktoc = all, colorlinks = true, urlcolor = DodgerBlue4, citecolor = PaleGreen1, linkcolor = blue} +#+LaTeX_CLASS: article +#+OPTIONS: H:8 ^:nil prop:("Effort" "Max_effort") tags:not-in-toc +#+COLUMNS: %50ITEM %Effort(Min Effort) %Max_effort(Max Effort) + +#+TITLE: rsync-backup-target + +#+LATEX: \pagebreak + +Usage of this service + +#+LATEX: \pagebreak + +#+LATEX: \pagebreak + + +* Configuration example + + +#+begin_src yaml +rsync-backup-target: + # docker-compose: + # ports: + # - "10023:22" + options: + admin: ## These keys are for the allowed rsync-backup to write stuff with rsync + myadmin: + - "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDESdz8bWtVcDQJ68IE/KpuZM9tAq\ + ZDXGbvEVnTg16/yWqBGQg0QZdDjISsPn7D3Zr64g2qgD9n7EZghfGP9TkitvfrBYx8p\ + 7JkkUyt8nxklwOlKZFD5b3PF2bHloSsmjnP8ZMp5Ar7E+tn1guGrCrTcFIebpVGR3qF\ + hRN9AlWNR+ekWo88ZlLJIrqD26jbWRJZm4nPCgqwhJwfHE3aVwfWGOqjSp4ij+jr2ac\ + Arg7eD4clBPYIqKlqbfNRD5MFAH9sbB6jkebQCAUwNRwV7pKwCEt79HnCMoMjnZh6Ww\ + 6TlHIFw936C2ZiTBuofMx7yoAeqpifyzz/T5wsFLYWwSnX rsync@zen" +#+end_src + +* ssh API +** Adding new keys for backup + +This can be done through the admin accounts configured in =compose.yml=. + +You can use then =ssh myadmin@$RSYNC_BACKUP_TARGET ssh-key=: + +#+begin_example +$ ssh myadmin@$RSYNC_BACKUP_TARGET ssh-key ls +$ ssh myadmin@$RSYNC_BACKUP_TARGET ssh-key add "ssh-rsa AAA...Jdhwhv rsync@sourcelabel" +$ ssh myadmin@$RSYNC_BACKUP_TARGET ssh-key ls +..Jdhwhv sourcelabel +$ ssh myadmin@$RSYNC_BACKUP_TARGET ssh-key rm sourcelabel +$ ssh myadmin@$RSYNC_BACKUP_TARGET ssh-key ls +$ +#+end_example + +** Requesting a recover only key + +*** as an admin + +As an admin, by requesting a recover-only key on an ident that you +own, you are allowed to read (and only read) the content of the given +ident. This will allow you to give the credentials to any new host to +have a direct read access so-as to deploy the backup on a new host. + +#+begin_example +$ ssh myadmin@$RSYNC_BACKUP_TARGET ssh-key request-recovery-key myident > /tmp/private_key +$ chmod 500 /tmp/private_key +$ rsync -e "ssh -p 22 -i /tmp/private_key -l rsync" \ + -azvArH --delete --delete-excluded \ + --partial --partial-dir .rsync-partial \ + --numeric-ids $RSYNC_BACKUP_TARGET:/var/mirror/myident/etc/ /tmp/etc +#+end_example + +This key will expire after 15 mn of the last recovery. + +*** as a standard backup account + +With a standard backup account, you can log on as =rsync= user and +request without any arguments a recovery key. Indeed, every standard +backup account is tied to one backup identifier only. So the recover +key received will be for this backup identifier only. + +You'll probably want to use the received key from another computer to +restore the backup for instance. + +#+begin_example +$ ssh rsync@$RSYNC_BACKUP_TARGET request-recovery-key > /tmp/private_key +$ chmod 500 /tmp/private_key +$ rsync -e "ssh -p 22 -i /tmp/private_key -l rsync" \ + -azvArH --delete --delete-excluded \ + --partial --partial-dir .rsync-partial \ + --numeric-ids $RSYNC_BACKUP_TARGET:/var/mirror/myident/etc/ /tmp/etc +#+end_example + + +* Troubleshooting + +** Faking access from client + +This should work: + +#+begin_src sh +RSYNC_BACKUP_TARGET_IP=172.18.0.2 +rsync -azvA -e "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" \ + /tmp/toto "$RSYNC_BACKUP_TARGET":/var/mirror/client1 +#+end_src + +** Direct ssh access should be refused + +#+begin_src sh +RSYNC_BACKUP_TARGET_IP=172.18.0.2 +ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \ + "$RSYNC_BACKUP_TARGET" +#+end_src + +** Wrong directory should be refused + +#+begin_src sh +RSYNC_BACKUP_TARGET_IP=172.18.0.2 +rsync -azvA -e "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" \ + /tmp/toto "$RSYNC_BACKUP_TARGET":/var/mirror/client2 +#+end_src diff --git a/rsync-backup-target/build/Dockerfile b/rsync-backup-target/build/Dockerfile index 554f34ba..28fe600c 100644 --- a/rsync-backup-target/build/Dockerfile +++ b/rsync-backup-target/build/Dockerfile @@ -2,16 +2,18 @@ FROM alpine:3.9 MAINTAINER Valentin Lab -RUN apk add rsync sudo bash openssh-server +## coreutils is for ``date`` support of ``--rfc-3339=seconds`` argument. +## findutils is for ``find`` support of ``--newermt`` argument. +RUN apk add rsync sudo bash openssh-server coreutils findutils RUN ssh-keygen -A ## New user/group rsync/rsync with home dir in /var/lib/rsync -RUN mkdir -p /var/lib/rsync && \ +RUN mkdir -p /var/lib/rsync /var/log/rsync && \ addgroup -S rsync && \ adduser -S rsync -h /var/lib/rsync -G rsync && \ - chown rsync:rsync /var/lib/rsync + chown rsync:rsync /var/lib/rsync /var/log/rsync -## Without this, account is concidered locked by SSH +## Without this, account is considered locked by SSH RUN sed -ri 's/^rsync:!:/rsync:*NP*:/g' /etc/shadow ## Withouth this, force-command will not run diff --git a/rsync-backup-target/build/entrypoint.sh b/rsync-backup-target/build/entrypoint.sh index 99fb98a9..94b06546 100755 --- a/rsync-backup-target/build/entrypoint.sh +++ b/rsync-backup-target/build/entrypoint.sh @@ -12,18 +12,36 @@ RSYNC_HOME=/var/lib/rsync mkdir -p "$RSYNC_HOME/.ssh" -for f in "$KEYS"/*.pub; do - [ -e "$f" ] || continue - content=$(cat "$f") - ident="${f##*/}" - ident="${ident%.pub}" - if ! [[ "$ident" =~ ^[a-zA-Z0-9._-]+$ ]]; then - echo "bad: '$ident'" - continue - fi - echo "command=\"/usr/local/sbin/ssh-cmd-validate \\\"$ident\\\"\",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty $content" -done > "$RSYNC_HOME"/.ssh/authorized_keys -chown rsync:rsync -R "$RSYNC_HOME"/.ssh -R +if ! egrep '^[^:]+:x:101:101:' /etc/passwd; then + ## Then it is a first run of this container, users + ## need to be created. Notice that container will be + ## re-created anew if user config was changed. + for user_dir in /etc/rsync/keys/admin/* /etc/rsync/keys/recover; do + [ -d "$user_dir" ] || continue + user="${user_dir##*/}" + [ "$user" != "rsync" ] || continue + + adduser -S "$user" -h "$user_dir" -G rsync && + chown "$user":rsync "$user_dir" || { + echo "Error: couldn't create user $user or chown '$user_dir'." >&2 + exit 1 + } + ## Without this, account is considered locked by SSH + sed -ri "s/^$user:\!:/$user:*NP*:/g" /etc/shadow + + ## Withouth this, force-command will not run + sed -ri "s%^($user.*:)[^:]+$%\1/bin/bash%g" /etc/passwd + + done +fi + +log="/var/log/rsync/ssh-admin-cmd-validate.log" +touch "$log" +chown rsync:rsync "$log" +chmod g+rw "$log" + + +ssh-update-keys ## Give back PID 1 so that ssh can receive signals exec /usr/sbin/sshd -D -e diff --git a/rsync-backup-target/build/src/etc/sudoers.d/recover b/rsync-backup-target/build/src/etc/sudoers.d/recover new file mode 100644 index 00000000..239e2c73 --- /dev/null +++ b/rsync-backup-target/build/src/etc/sudoers.d/recover @@ -0,0 +1,7 @@ +## allow admin users to request a recovery key, this is really not +## sufficient, but the real check is done on the +## ``ssh-admin-cmd-validate`` side. + +%rsync ALL=(root) NOPASSWD: /usr/local/sbin/request-recovery-key * +%rsync ALL=(root) NOPASSWD: /bin/touch /etc/rsync/keys/recover/* +%rsync ALL=(root) NOPASSWD: /usr/local/sbin/ssh-update-keys diff --git a/rsync-backup-target/build/src/etc/sudoers.d/rsync b/rsync-backup-target/build/src/etc/sudoers.d/rsync index 2b8b4ee2..88c63450 100644 --- a/rsync-backup-target/build/src/etc/sudoers.d/rsync +++ b/rsync-backup-target/build/src/etc/sudoers.d/rsync @@ -2,3 +2,6 @@ ## the real check is done on the ``ssh-cmd-validate`` side. rsync ALL=(root) NOPASSWD: /usr/bin/rsync --server * . /var/mirror/* + +%rsync ALL=(root) NOPASSWD: /usr/local/sbin/ssh-key * +%rsync ALL=(root) NOPASSWD: /usr/local/sbin/ssh-update-keys diff --git a/rsync-backup-target/build/src/usr/local/sbin/request-recovery-key b/rsync-backup-target/build/src/usr/local/sbin/request-recovery-key new file mode 100755 index 00000000..40df8df0 --- /dev/null +++ b/rsync-backup-target/build/src/usr/local/sbin/request-recovery-key @@ -0,0 +1,76 @@ +#!/bin/bash + +RSYNC_KEY_PATH=/etc/rsync/keys +RECOVER_KEY_PATH=${RSYNC_KEY_PATH}/recover + + +ANSI_ESC=$'\e[' + +NORMAL="${ANSI_ESC}0m" + +GRAY="${ANSI_ESC}1;30m" +RED="${ANSI_ESC}1;31m" +GREEN="${ANSI_ESC}1;32m" +YELLOW="${ANSI_ESC}1;33m" +BLUE="${ANSI_ESC}1;34m" +PINK="${ANSI_ESC}1;35m" +CYAN="${ANSI_ESC}1;36m" +WHITE="${ANSI_ESC}1;37m" + +DARKGRAY="${ANSI_ESC}0;30m" +DARKRED="${ANSI_ESC}0;31m" +DARKGREEN="${ANSI_ESC}0;32m" +DARKYELLOW="${ANSI_ESC}0;33m" +DARKBLUE="${ANSI_ESC}0;34m" +DARKPINK="${ANSI_ESC}0;35m" +DARKCYAN="${ANSI_ESC}0;36m" +DARKWHITE="${ANSI_ESC}0;37m" + + +ssh:mk-private-key() { + local comment="$1" + ( + tmpdir=$(mktemp -d) + chmod go-rwx "$tmpdir" + ssh-keygen -t rsa -N "" -f "$tmpdir/rsync_rsa" -C "$service_name@$host" >/dev/null + cat "$tmpdir/rsync_rsa" + rm -rf "$tmpdir" + ) +} + + +md5() { + local md5 + md5=$(cat | md5sum) + echo "${md5%% *}" +} + + +request-recovery-key() { + local label="$1" ident="$2" key public_key + + ## Admin should have claimed the ident with at least one backup key + if [ -n "$label" ] && ! [ -e "${RSYNC_KEY_PATH}/backup/$label/$ident.pub" ]; then + echo "Error: Current admin '$label' has no ident '$ident' claimed." >&2 + return 1 + fi + + ## Find new label + while true; do + key=$(ssh:mk-private-key "recover@$ident") + md5=$(printf "%s" "$key" | md5) + [ -e "${RECOVER_KEY_PATH}/$md5" ] || break + done + + mkdir -p "${RECOVER_KEY_PATH}" + public_key=$(ssh-keygen -y -f <(printf "%s\n" "$key")) + printf "%s %s\n" "$public_key" "recover@$ident" > "${RECOVER_KEY_PATH}/$md5.pub" + touch "${RECOVER_KEY_PATH}/$md5" + chmod go-rwx "${RECOVER_KEY_PATH}/$md5" + printf "%s\n" "$key" | tee -a "${RECOVER_KEY_PATH}/$md5" + + /usr/local/sbin/ssh-update-keys +} + + +request-recovery-key "$@" \ No newline at end of file diff --git a/rsync-backup-target/build/src/usr/local/sbin/ssh-admin-cmd-validate b/rsync-backup-target/build/src/usr/local/sbin/ssh-admin-cmd-validate new file mode 100755 index 00000000..11c937d7 --- /dev/null +++ b/rsync-backup-target/build/src/usr/local/sbin/ssh-admin-cmd-validate @@ -0,0 +1,106 @@ +#!/bin/bash + +## Note that the shebang is not used, but it's the login shell that +## will execute this command. + +exname=$(basename "$0") + +mkdir -p /var/log/rsync + +LOG="/var/log/rsync/$exname.log" + + +ssh_connection=(${SSH_CONNECTION}) +SSH_SOURCE_IP="${ssh_connection[0]}:${ssh_connection[1]}" + +log() { + printf "%s [%s] %s - %s\n" \ + "$(date --rfc-3339=seconds)" "$$" "$SSH_SOURCE_IP" "$*" \ + >> "$LOG" +} + +log "NEW ADMIN CONNECTION" + +if [ -z "$1" ] || ! [[ "$1" =~ ^[a-zA-Z0-9._-]+$ ]]; then + log "INVALID SETUP, ARG IS: '$1'" + echo "Your command has been rejected. Contact administrator." + exit 1 +fi + +label="$1" + + +reject() { + log "REJECTED: $SSH_ORIGINAL_COMMAND" + # echo "ORIG: $SSH_ORIGINAL_COMMAND" >&2 + echo "Your command has been rejected and reported to sys admin." >&2 + exit 1 +} + + +if [[ "$SSH_ORIGINAL_COMMAND" =~ [\&\(\{\;\<\>\`\$\}] ]]; then + log "BAD CHARS DETECTED" + # echo "Bad chars: $SSH_ORIGINAL_COMMAND" >&2 + reject +fi + +if [[ "$SSH_ORIGINAL_COMMAND" =~ ^"ssh-key add ssh-rsa "[a-zA-Z0-9/+]+" "[a-zA-Z0-9._-]+"@"[a-zA-Z0-9._-]+""$ ]]; then + log "ACCEPTED: $SSH_ORIGINAL_COMMAND" + + ## Interpret \ to allow passing spaces (want to avoid possible issue with \n) + #read -a ssh_args <<< "${SSH_ORIGINAL_COMMAND}" + ssh_args=(${SSH_ORIGINAL_COMMAND}) + + # echo "Would accept: $SSH_ORIGINAL_COMMAND" >&2 + exec sudo /usr/local/sbin/ssh-key add "$label" "${ssh_args[@]:2}" +elif [[ "$SSH_ORIGINAL_COMMAND" =~ ^"ssh-key ls"$ ]]; then + log "ACCEPTED: $SSH_ORIGINAL_COMMAND" + + ## Interpret \ to allow passing spaces (want to avoid possible issue with \n) + #read -a ssh_args <<< "${SSH_ORIGINAL_COMMAND}" + ssh_args=(${SSH_ORIGINAL_COMMAND}) + + # echo "Would accept: $SSH_ORIGINAL_COMMAND" >&2 + exec /usr/local/sbin/ssh-key ls "$label" "${ssh_args[@]:2}" +elif [[ "$SSH_ORIGINAL_COMMAND" =~ ^"ssh-key rm "[a-zA-Z0-9._-]+$ ]]; then + log "ACCEPTED: $SSH_ORIGINAL_COMMAND" + + ## Interpret \ to allow passing spaces (want to avoid possible issue with \n) + #read -a ssh_args <<< "${SSH_ORIGINAL_COMMAND}" + ssh_args=(${SSH_ORIGINAL_COMMAND}) + + # echo "Would accept: $SSH_ORIGINAL_COMMAND" >&2 + exec sudo /usr/local/sbin/ssh-key rm "$label" "${ssh_args[@]:2}" +elif [[ "$SSH_ORIGINAL_COMMAND" =~ ^"ssh-key get-type "[a-zA-Z0-9._-]+$ ]]; then + log "ACCEPTED: $SSH_ORIGINAL_COMMAND" + + ## Interpret \ to allow passing spaces (want to avoid possible issue with \n) + #read -a ssh_args <<< "${SSH_ORIGINAL_COMMAND}" + ssh_args=(${SSH_ORIGINAL_COMMAND}) + + # echo "Would accept: $SSH_ORIGINAL_COMMAND" >&2 + exec sudo /usr/local/sbin/ssh-key get-type "$label" "${ssh_args[@]:2}" +elif [[ "$SSH_ORIGINAL_COMMAND" =~ ^"request-recovery-key "[a-zA-Z0-9._-]+$ ]]; then + log "ACCEPTED: $SSH_ORIGINAL_COMMAND" + + ## Interpret \ to allow passing spaces (want to avoid possible issue with \n) + #read -a ssh_args <<< "${SSH_ORIGINAL_COMMAND}" + ssh_args=(${SSH_ORIGINAL_COMMAND}) + + # echo "Would accept: $SSH_ORIGINAL_COMMAND" >&2 + exec sudo /usr/local/sbin/request-recovery-key "$label" "${ssh_args[@]:1}" +else + + log "NOT MATCHING ANY ALLOWED COMMAND" + reject +fi + +## For other commands, like `find` or `md5`, that could be used to +## challenge the backups and check that archive is actually +## functional, I would suggest to write a simple command that takes no +## arguments, so as to prevent allowing wildcards or suspicious +## contents. Letting `find` go through is dangerous for instance +## because of the `-exec`. And path traversal can be done also when +## allowing /my/path/* by using '..'. This is why a fixed purpose +## embedded executable will be much simpler to handle, and to be honest +## we don't need much more. diff --git a/rsync-backup-target/build/src/usr/local/sbin/ssh-cmd-validate b/rsync-backup-target/build/src/usr/local/sbin/ssh-cmd-validate index e1940b21..8e05a2d0 100755 --- a/rsync-backup-target/build/src/usr/local/sbin/ssh-cmd-validate +++ b/rsync-backup-target/build/src/usr/local/sbin/ssh-cmd-validate @@ -5,30 +5,84 @@ exname=$(basename "$0") +mkdir -p /var/log/rsync + +LOG="/var/log/rsync/$exname.log" + + +ssh_connection=(${SSH_CONNECTION}) +SSH_SOURCE_IP="${ssh_connection[0]}:${ssh_connection[1]}" + +log() { + printf "%s [%s] %s - %s\n" \ + "$(date --rfc-3339=seconds)" "$$" "$SSH_SOURCE_IP" "$*" \ + >> "$LOG" +} + +log "NEW BACKUP CONNECTION" + if [ -z "$1" ] || ! [[ "$1" =~ ^[a-zA-Z0-9._-]+$ ]]; then - logger -t "$exname" "INVALID SETUP, ARG IS: '$1'" + log "INVALID SETUP, ARG IS: '$1'" echo "Your command has been rejected. Contact administrator." exit 1 fi +ident="$1" +log "IDENTIFIED AS $ident" + reject() { - logger -t "$exname" "REJECTED: $SSH_ORIGINAL_COMMAND" + log "REJECTED: $SSH_ORIGINAL_COMMAND" # echo "ORIG: $SSH_ORIGINAL_COMMAND" >&2 echo "Your command has been rejected and reported to sys admin." >&2 exit 1 } +sudo /usr/local/sbin/ssh-update-keys if [[ "$SSH_ORIGINAL_COMMAND" =~ [\&\(\{\;\<\>\`\$\}] ]]; then + log "BAD CHARS DETECTED" # echo "Bad chars: $SSH_ORIGINAL_COMMAND" >&2 reject fi -if [[ "$SSH_ORIGINAL_COMMAND" =~ ^"rsync --server -"[vloHgDtpArRzCeiLsfx\.]+(" --"[a-z-]+|" --partial-dir .rsync-partial")*" . /var/mirror/$1"$ ]]; then - logger -t "$exname" "ACCEPTED: $SSH_ORIGINAL_COMMAND" - # echo "Would accept: $SSH_ORIGINAL_COMMAND" >&2 - exec sudo $SSH_ORIGINAL_COMMAND +if [[ "$SSH_ORIGINAL_COMMAND" =~ ^"rsync --server -"[vnloHgDtpArRzCeiLsfx\.]+(" --"[a-z=%-]+|" --partial-dir .rsync-partial")*" . /var/mirror/$ident"$ ]]; then + log "ACCEPTED BACKUP COMMAND: $SSH_ORIGINAL_COMMAND" + + ## Interpret \ to allow passing spaces (want to avoid possible issue with \n) + #read -a ssh_args <<< "${SSH_ORIGINAL_COMMAND}" + ssh_args=(${SSH_ORIGINAL_COMMAND}) + + exec sudo "${ssh_args[@]::3}" \ + "--log-file=/var/log/rsync/target_$1_rsync.log" \ + "--log-file-format=%i %o %f %l %b" \ + "${ssh_args[@]:3}" +elif [[ "$SSH_ORIGINAL_COMMAND" =~ ^"rsync --server --sender -"[vnloHgDtpArRzCeiLsfx\.]+(" --"[a-z=%-]+|" --partial-dir .rsync-partial")*" . /var/mirror/$ident"(|/.*)$ ]]; then + + ## Interpret \ to allow passing spaces (want to avoid possible issue with \n) + #read -a ssh_args <<< "${SSH_ORIGINAL_COMMAND}" + ssh_args=(${SSH_ORIGINAL_COMMAND}) + + last_arg="${ssh_args[@]: -1:1}" + if ! new_path=$(realpath "$last_arg" 2>/dev/null); then + log "FINAL PATH INVALID" + reject + fi + + if [[ "$new_path" != "$last_arg" ]] && + [[ "$new_path" != "/var/mirror/$ident/"* ]] && + [[ "$new_path" != "/var/mirror/$ident" ]]; then + log "FINAL PATH SUSPICIOUS" + reject + fi + + log "ACCEPTED RECOVER COMMAND: $SSH_ORIGINAL_COMMAND" + exec sudo "${ssh_args[@]}" +elif [[ "$SSH_ORIGINAL_COMMAND" =~ ^"request-recovery-key"$ ]]; then + log "ACCEPTED RECOVERY KEY REQUEST: $SSH_ORIGINAL_COMMAND" + + exec sudo /usr/local/sbin/request-recovery-key "" "$ident" else + log "REFUSED COMMAND AS IT DOESN'T MATCH ANY EXPECTED COMMAND" reject fi diff --git a/rsync-backup-target/build/src/usr/local/sbin/ssh-key b/rsync-backup-target/build/src/usr/local/sbin/ssh-key new file mode 100755 index 00000000..1eff6201 --- /dev/null +++ b/rsync-backup-target/build/src/usr/local/sbin/ssh-key @@ -0,0 +1,152 @@ +#!/bin/bash + +RSYNC_KEY_PATH=/etc/rsync/keys + + +ANSI_ESC=$'\e[' + +NORMAL="${ANSI_ESC}0m" + +GRAY="${ANSI_ESC}1;30m" +RED="${ANSI_ESC}1;31m" +GREEN="${ANSI_ESC}1;32m" +YELLOW="${ANSI_ESC}1;33m" +BLUE="${ANSI_ESC}1;34m" +PINK="${ANSI_ESC}1;35m" +CYAN="${ANSI_ESC}1;36m" +WHITE="${ANSI_ESC}1;37m" + +DARKGRAY="${ANSI_ESC}0;30m" +DARKRED="${ANSI_ESC}0;31m" +DARKGREEN="${ANSI_ESC}0;32m" +DARKYELLOW="${ANSI_ESC}0;33m" +DARKBLUE="${ANSI_ESC}0;34m" +DARKPINK="${ANSI_ESC}0;35m" +DARKCYAN="${ANSI_ESC}0;36m" +DARKWHITE="${ANSI_ESC}0;37m" + + +ssh-key-ls() { + local label="$1" f content + for f in "${RSYNC_KEY_PATH}"/backup/"$label"/*.pub; do + [ -e "$f" ] || continue + ident=${f##*/} + ident=${ident%.pub} + content=$(cat "$f") + key=${content#* } + key=${key% *} + printf "${DARKGRAY}..${NORMAL}%24s ${DARKCYAN}%s${NORMAL}\n" "${key: -24}" "$ident" + done +} + + +ssh-key-rm() { + local label="$1" ident="$2" delete + + delete="${RSYNC_KEY_PATH}/backup/$label/$ident.pub" + if ! [ -e "$delete" ]; then + echo "Error: key '$ident' not found." >&2 + return 1 + fi + rm "$delete" + + /usr/local/sbin/ssh-update-keys +} + + +ssh-key-get-type() { + local label="$1" ident="$2" key content commentary + + key="${RSYNC_KEY_PATH}/backup/$label/$ident.pub" + if ! [ -e "$key" ]; then + echo "Error: key '$ident' not found." >&2 + return 1 + fi + content=$(cat "$key") || return 1 + commentary=${content##* } + printf "%s\n" "${commentary%%@*}" +} + + +ssh-key-add() { + local label="$1" type="$2" key="$3" email="$4" + + [ "$type" == "ssh-rsa" ] || { + echo "Error: expecting ssh-rsa key type" >&2 + return 1 + } + + ## ident are unique by construction (they are struct keys) + ## but keys need to be also unique + declare -A keys + content="$type $key $email" + ident="${email##*@}" + target="${RSYNC_KEY_PATH}/backup/$label/$ident.pub" + + ## is key used already ? As key give access to a specified subdir, + ## we need to make sure it is unique. + + for key_file in "${RSYNC_KEY_PATH}/backup/"*/*.pub; do + [ -e "$key_file" ] || continue + key_content=$(cat "$key_file") + if [ "$type $key" == "${key_content% *}" ]; then + if [ "$key_file" == "$target" ]; then + echo "Provided key already present for '$ident'." >&2 + return 0 + elif [[ "$key_file" == "${RSYNC_KEY_PATH}/"*"/$label/"*.pub ]]; then + type=${key_file#"${RSYNC_KEY_PATH}/"} + type=${type%"/$label/"*.pub} + key_ident=${key_file##*/} + key_ident=${key_ident%.pub} + echo "Provided key already used as $type key for '$key_ident'." >&2 + return 1 + else + olabel=${key_file#"${RSYNC_KEY_PATH}/"*/} + olabel=${olabel%/*.pub} + echo "Specified key is already used by '$olabel' account, please pick another one." >&2 + return 1 + fi + fi + done + + mkdir -p "${target%/*}" + if [ -e "$target" ]; then + echo "Replacing key for '$ident'." >&2 + elif [ -e "${RSYNC_KEY_PATH}/"*"/"*"/$ident.pub" ]; then + olabel=("${RSYNC_KEY_PATH}/"*"/"*"/$ident.pub") + olabel="${olabel[0]}" + olabel=${olabel#"${RSYNC_KEY_PATH}/"*/} + olabel=${olabel%/*.pub} + echo "ident '$ident' is already reserved by '$olabel', please pick another one." >&2 + return 1 + fi + echo "$content" > "$target" + + /usr/local/sbin/ssh-update-keys +} + + + + +case "$1" in + "add") + shift + ssh-key-add "$@" + ;; + "rm") + shift + ssh-key-rm "$@" + ;; + "ls") + shift + ssh-key-ls "$@" + ;; + "get-type") + shift + ssh-key-get-type "$@" + ;; + *) + echo "Unknown command '$1'." + ;; +esac + diff --git a/rsync-backup-target/build/src/usr/local/sbin/ssh-recover-cmd-validate b/rsync-backup-target/build/src/usr/local/sbin/ssh-recover-cmd-validate new file mode 100755 index 00000000..e82356f4 --- /dev/null +++ b/rsync-backup-target/build/src/usr/local/sbin/ssh-recover-cmd-validate @@ -0,0 +1,97 @@ +#!/bin/bash + +## Note that the shebang is not used, but it's the login shell that +## will execute this command. + +RSYNC_KEY_PATH=/etc/rsync/keys +RECOVER_KEY_PATH=${RSYNC_KEY_PATH}/recover + +exname=$(basename "$0") + +mkdir -p /var/log/rsync + +LOG="/var/log/rsync/$exname.log" + + +ssh_connection=(${SSH_CONNECTION}) +SSH_SOURCE_IP="${ssh_connection[0]}:${ssh_connection[1]}" + +log() { + printf "%s [%s] %s - %s\n" \ + "$(date --rfc-3339=seconds)" "$$" "$SSH_SOURCE_IP" "$*" \ + >> "$LOG" +} + +log "NEW RECOVER CONNECTION" + +if [ -z "$1" ] || ! [[ "$1" =~ ^[a-z0-9]+$ ]]; then + log "INVALID SETUP, ARG 1 SHOULD BE MD5 AND IS: '$1'" + echo "Your command has been rejected. Contact administrator." + exit 1 +fi + +md5="$1" +log "RECOVER KEY $md5" + +if [ -z "$2" ] || ! [[ "$2" =~ ^[a-zA-Z0-9._-]+$ ]]; then + log "INVALID SETUP, IDENT IS: '$1'" + echo "Your command has been rejected. Contact administrator." + exit 1 +fi + +ident="$2" +log "IDENTIFIED AS $ident" + +reject() { + log "REJECTED: $SSH_ORIGINAL_COMMAND" + # echo "ORIG: $SSH_ORIGINAL_COMMAND" >&2 + echo "Your command has been rejected and reported to sys admin." >&2 + exit 1 +} + + +if [[ "$SSH_ORIGINAL_COMMAND" =~ [\&\(\{\;\<\>\`\$\}] ]]; then + log "BAD CHARS DETECTED" + # echo "Bad chars: $SSH_ORIGINAL_COMMAND" >&2 + reject +fi + +if [[ "$SSH_ORIGINAL_COMMAND" =~ ^"rsync --server --sender -"[vnldHogDtpArRze\.iLsfxC]+(" --"[a-z=%-]+|" --partial-dir .rsync-partial")*" . /var/mirror/$ident"(|/.*)$ ]]; then + + ## Interpret \ to allow passing spaces (want to avoid possible issue with \n) + #read -a ssh_args <<< "${SSH_ORIGINAL_COMMAND}" + ssh_args=(${SSH_ORIGINAL_COMMAND}) + + last_arg="${ssh_args[@]: -1:1}" + if ! new_path=$(realpath "$last_arg" 2>/dev/null); then + log "FINAL PATH INVALID" + reject + fi + + if [[ "$new_path" != "$last_arg" ]] && + [[ "$new_path" != "/var/mirror/$ident/"* ]] && + [[ "$new_path" != "/var/mirror/$ident" ]]; then + log "FINAL PATH SUSPICIOUS" + reject + fi + + sudo /usr/local/sbin/ssh-update-keys + if ! [ -e "${RECOVER_KEY_PATH}/$md5" ]; then + log "RECOVERY KEY $md5 JUST EXPIRED" + reject + fi + + log "ACCEPTED RECOVER COMMAND: $SSH_ORIGINAL_COMMAND" + sudo "${ssh_args[@]}" + errlvl="$?" + + for key_file in "${RECOVER_KEY_PATH}/$md5"{,.pub}; do + [ -e "$key_file" ] || continue + sudo touch "$key_file" ## Update modified time to keep key longer + done + + exit "$errlvl" +else + log "REFUSED COMMAND AS IT DOESN'T MATCH ANY EXPECTED COMMAND" + reject +fi diff --git a/rsync-backup-target/build/src/usr/local/sbin/ssh-update-keys b/rsync-backup-target/build/src/usr/local/sbin/ssh-update-keys new file mode 100755 index 00000000..aeb79b70 --- /dev/null +++ b/rsync-backup-target/build/src/usr/local/sbin/ssh-update-keys @@ -0,0 +1,68 @@ +#!/bin/bash + +## Keep in mind possible race conditions as this script will be called +## from different place to update the access tokens. + + +## +## Code +## + +RSYNC_KEY_PATH=/etc/rsync/keys +RSYNC_HOME=/var/lib/rsync +BACKUP_KEY_PATH=${RSYNC_KEY_PATH}/backup +RECOVER_KEY_PATH=${RSYNC_KEY_PATH}/recover + + +mkdir -p "$RSYNC_HOME/.ssh" "$RECOVER_KEY_PATH" + +## delete old recovery keys +find "${RECOVER_KEY_PATH}" \ + -maxdepth 1 -not -newermt "-15 minutes" \ + -type f -delete + + +## +## New +## + +pid=$$ +new="$RSYNC_HOME"/.ssh/authorized_keys.tmp."$pid" +touch "$new" + +for f in "$BACKUP_KEY_PATH"/*/*.pub "$RECOVER_KEY_PATH"/*.pub; do + [ -e "$f" ] || continue + content=$(cat "$f") + if [[ "$content" == *" "*" "*@* ]]; then + ident="${content##*@}" + else + ident="${f##*/}" + ident="${ident%.pub}" + fi + if ! [[ "$ident" =~ ^[a-zA-Z0-9._-]+$ ]]; then + echo "bad: '$ident'" >&2 + continue + fi + if [[ "$f" == "${RECOVER_KEY_PATH}"/*.pub ]]; then + basename=${f##*/} + basename=${basename%.pub} + cmd="/usr/local/sbin/ssh-recover-cmd-validate $basename" + else + cmd=/usr/local/sbin/ssh-cmd-validate + fi + echo "command=\"$cmd \\\"$ident\\\"\",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty $content" +done >> "$new" + +[ -e "$RSYNC_HOME"/.ssh/authorized_keys ] && + mv "$RSYNC_HOME"/.ssh/authorized_keys{,.old} + +## XXXvlab: Atomic operation. It's the last call to this instruction +## that will prevail. There are some very special cases where some +## added key would not be added as expected: for instance an older +## call to ``ssh-update-key``, if made before a specific public key +## file was added to directory, could take a longer time to reach this +## next instruction than a more recent call (that would be after +## the specific public key was added). +mv "$new" "$RSYNC_HOME"/.ssh/authorized_keys + +chown rsync:rsync "$RSYNC_HOME"/.ssh -R diff --git a/rsync-backup-target/hooks/init b/rsync-backup-target/hooks/init index 04fee963..3be99433 100755 --- a/rsync-backup-target/hooks/init +++ b/rsync-backup-target/hooks/init @@ -15,41 +15,64 @@ set -e service_def=$(get_compose_service_def "$SERVICE_NAME") -keys=$(echo "$service_def" | shyaml -y get-value options.keys 2>/dev/null) || { - err "You must specify a ${WHITE}keys${NORMAL} struct to use this service" +admin_keys=$(echo "$service_def" | shyaml -y get-value options.admin 2>/dev/null) || { + err "You must specify a ${WHITE}admin${NORMAL} struct to use this service" exit 1 } -[ "$(echo "$keys" | shyaml -y get-type 2>/dev/null)" == "struct" ] || { - err "Invalid value type for ${WHITE}keys${NORMAL}, please provide a struct" +[ "$(echo "$admin_keys" | shyaml -y get-type 2>/dev/null)" == "struct" ] || { + err "Invalid value type for ${WHITE}admin${NORMAL}, please provide a struct" exit 1 } -local_path_key=/etc/rsync/keys + +rebuild-config() { + + rm -rf "$SERVICE_CONFIGSTORE/etc/rsync/keys/admin" + mkdir -p "$host_path_key" + + while read-0 ident keys; do + ident=$(e "$ident" | shyaml get-value) + if ! [[ "$ident" =~ ^[a-zA-Z0-9._-]+$ ]]; then + err "Invalid identifier '$ident'," \ + "please use only alphanumerical char, dots, dash or underscores." + exit 1 + fi + debug "Setting access keys for ${ident}" + [ "$(echo "$keys" | shyaml -y get-type 2>/dev/null)" == "sequence" ] || { + err "Invalid value type for ${WHITE}admin.$ident${NORMAL}, please provide a sequence" + echo " Received: '$keys'" >&2 + exit 1 + } + + while read-0 key; do + echo "command=\"/usr/local/sbin/ssh-admin-cmd-validate \\\"$ident\\\"\",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty $key" + done < <(echo "$keys" | shyaml get-values-0) | file_put "$host_path_key/$ident/.ssh/authorized_keys" + done < <(echo "$admin_keys" | shyaml -y key-values-0) + + e "$control_users" > "$CONTROL_USERS_FILE" + +} + +local_path_key=/etc/rsync/keys/admin host_path_key="$SERVICE_CONFIGSTORE${local_path_key}" -key_nb=0 - -## ident are unique by construction (they are struct keys) -## but keys need to be also unique -declare -A keys -while read-0 ident key; do - if [ "${keys[$key]}" ]; then - err "Duplicate key: key for ident '$ident' is same as ident '${keys["$key"]}'." - exit 1 - fi - if ! [[ "$ident" =~ ^[a-zA-Z0-9._-]+$ ]]; then - err "Invalid identifier '$ident'," \ - "please use only alphanumerical char, dots, dash or underscores." - exit 1 - fi - debug "Creating access key for ${ident}" || true - echo "$key" | file_put "$host_path_key/${ident}.pub" - keys["$key"]="$ident" -done < <(echo "$keys" | shyaml key-values-0) + + +CONTROL_USERS_FILE="$SERVICE_DATASTORE/.control-pass" +## Was it already properly propagated to database ? +control_users=$(H "${admin_keys}" "$(declare -f "rebuild-config")") init-config-add "\ $SERVICE_NAME: volumes: - - $host_path_key:$local_path_key:ro + - $host_path_key:$local_path_key + labels: + - compose.config_hash=$control_users " +if [ -e "$CONTROL_USERS_FILE" ] && [ "$control_users" == "$(cat "$CONTROL_USERS_FILE")" ]; then + exit 0 +fi + + +rebuild-config diff --git a/rsync-backup-target/hooks/log_rotate-relation-joined b/rsync-backup-target/hooks/log_rotate-relation-joined new file mode 100755 index 00000000..767ae34d --- /dev/null +++ b/rsync-backup-target/hooks/log_rotate-relation-joined @@ -0,0 +1,83 @@ +#!/bin/bash + +## Should be executable N time in a row with same result. + +. lib/common + +set -e + +uid=$(docker_get_uid "$SERVICE_NAME" "rsync") + + +LOGS=/var/log/rsync +mkdir -p "$SERVICE_DATASTORE/$LOGS" +touch "$SERVICE_DATASTORE/$LOGS/ssh-cmd-validate.log" +chown -v "$uid" "$SERVICE_DATASTORE/$LOGS" "$SERVICE_DATASTORE/$LOGS/ssh-cmd-validate.log" + +rotated_count=$(relation-get rotated-count 2>/dev/null) || true +rotated_count=${rotated_count:-52} + + +## XXXvlab: a lot of this intelligence should be moved away into ``logrotate`` charm +DST="$CONFIGSTORE/$TARGET_SERVICE_NAME/etc/logrotate.d/$SERVICE_NAME" +file_put "$DST" </dev/null 2>&1; then - if ! prev_key=$(shyaml get-value "${service_name//./\\.}.options.keys.${DOMAIN//./\\.}" \ + if ! prev_key=$(shyaml get-value "${service_name//./\\.}.options.admin.${DOMAIN//./\\.}" \ < "$compose_file"); then err "Couldn't query file '$compose_file' for key of domain '$DOMAIN'." exit 1 fi if [ "${prev_key}" == "$SSH_PUBLIC_KEY" ]; then - echo "Key was already setup." + info "Key is already setup correctly." exit 0 fi @@ -123,19 +125,25 @@ EOF exit 1 fi echo "${WHITE}Applying these changes:${NORMAL}" - echo "$diff" - + if type -p colordiff >/dev/null; then + colordiff -u "$compose_file" <(echo "$content") + else + echo "$diff" + fi | egrep -v "^[^ ]*(---|\+\+\+)" cp "$compose_file" "${compose_file}.old" echo "$content" > "$compose_file" - ## reloading (could be much faster) - compose --debug down && compose --debug up - - if [ "$?" == 0 ]; then - echo "Added key, and restarted service ${DARKYELLOW}$service_name${NORMAL}." + if [ -z "$opt_no_reload" ]; then + ## reloading (could be much faster) + compose --debug down && compose --debug up + if [ "$?" == 0 ]; then + info "Added key, and restarted service ${DARKYELLOW}$service_name${NORMAL}." + else + err "something went wrong ! Should check the state of '$DOMAIN' !!" + exit 1 + fi else - echo "something went wrong ! Should check the state of '$DOMAIN' !!" - exit 1 + info "Added key, you'll need to restart service ${DARKYELLOW}$service_name${NORMAL}." fi } diff --git a/rsync-backup/build/Dockerfile b/rsync-backup/build/Dockerfile index 19911ff5..10aa6ec5 100644 --- a/rsync-backup/build/Dockerfile +++ b/rsync-backup/build/Dockerfile @@ -6,7 +6,7 @@ MAINTAINER Valentin Lab RUN apk add bash rsync sudo openssh-client # RUN apt-get update && \ -# DEBIAN_FRONTEND=noninteractive apt-get install --force-yes -y --no-install-recommends rsync sudo openssh-client && \ +# DEBIAN_FRONTEND=noninteractive apt-get install -y --no-install-recommends rsync sudo openssh-client && \ # apt-get clean && \ # rm -rf /var/lib/apt/lists/* diff --git a/rsync-backup/hooks/install.d/60-install.sh b/rsync-backup/hooks/install.d/60-install.sh old mode 100644 new mode 100755 index 7b080316..5854d348 --- a/rsync-backup/hooks/install.d/60-install.sh +++ b/rsync-backup/hooks/install.d/60-install.sh @@ -4,16 +4,25 @@ set -eux -[ "${DOMAIN}" ] || { +[ -n "${DOMAIN}" ] || { echo "Error: you must set \$DOMAIN prior to running this script." >&2 exit 1 } -[ "${BACKUP_SERVER}" ] || { +[ -n "${BACKUP_SERVER}" ] || { echo "Error: you must set \$BACKUP_SERVER prior to running this script." >&2 exit 1 } +KEY_BACKUP_ID=${KEY_BACKUP_ID:-rsync} + +KEY_COMMENTARY="$KEY_BACKUP_ID@$DOMAIN" + +MIRROR_DIR_PATH="${MIRROR_DIR_PATH:-$PWD/resources/bin/mirror-dir}" +[ -e "$MIRROR_DIR_PATH" ] || { + echo "Error: you must set \$MIRROR_DIR_PATH or be the root of the charm to run this script." >&2 + exit 1 +} ## rsync type -p rsync >/dev/null 2>&1 || apt-get install -y rsync /dev/null || chown rsync:rsync /var/lib/rsync ## rsync ssh key creation -[ -e /var/lib/rsync/.ssh/id_rsa ] || - su -c 'ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa -q -C rsync@'"$DOMAIN" - rsync +if [ -e /var/lib/rsync/.ssh/id_rsa.pub ]; then + ## Mainly for update of old solution + content=$(cat /var/lib/rsync/.ssh/id_rsa.pub) + commentary=${content##* } + if [ "${commentary}" != "$KEY_COMMENTARY" ]; then + echo "Updating ssh key commentary from '${commentary}' to '$KEY_COMMENTARY'" >&2 + sed -ri "s/ [^ ]+\$/ $KEY_COMMENTARY/" /var/lib/rsync/.ssh/id_rsa.pub + fi +else + su -c 'ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa -q -C '"$KEY_COMMENTARY" - rsync +fi dest="$BACKUP_SERVER" if [[ "$dest" == *"/"* ]]; then @@ -45,9 +63,15 @@ fi ssh-keyscan "${ssh_options[@]}" -H "${dest}" > /var/lib/rsync/.ssh/known_hosts -apt-get install kal-shlib-process /dev/null || awk --version) in + "mawk 1.3.3"*) + ## Not good, it is from 1996, and we still find it on Debian 10 + apt-get install -y gawk /etc/cron.d/mirror-dir SHELL=/bin/bash PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin -$((RANDOM % 60)) * * * * root mirror-dir -h "$DOMAIN" -d "$BACKUP_SERVER" -u rsync 2>&1 | logger -t mirror-dir +$((RANDOM % 60)) $((RANDOM % 4))-23/4 * * * root mirror-dir backup -q -h "$DOMAIN" -d "$BACKUP_SERVER" EOF diff --git a/rsync-backup/hooks/schedule_command-relation-joined b/rsync-backup/hooks/schedule_command-relation-joined index 0b8c1756..0c870600 100755 --- a/rsync-backup/hooks/schedule_command-relation-joined +++ b/rsync-backup/hooks/schedule_command-relation-joined @@ -41,6 +41,7 @@ $schedule root lock $label -v -D -p 10 -k -c "\ -v \"$RSYNC_CONFIG_DIR:/etc/rsync\" \ -v \"$host_path_key:$local_path_key\" \ -v \"$HOST_DATASTORE:/mnt/source\" \ + -v \"$HOST_COMPOSE_YML_FILE:/mnt/source/compose.yml\" \ --network ${PROJECT_NAME}_default \ \"$DOCKER_BASE_IMAGE\" \ /mnt/source \"$target\"" 2>&1 | ts '\%F \%T' >> /var/log/cron/${label}_script.log diff --git a/rsync-backup/metadata.yml b/rsync-backup/metadata.yml index 4afd39e8..568257e1 100644 --- a/rsync-backup/metadata.yml +++ b/rsync-backup/metadata.yml @@ -13,5 +13,5 @@ uses: default-options: ## backup every day on random time schedule: !bash-stdout | - printf "%d %d * * *" "$((RANDOM % 60))" "$((RANDOM % 6))" + printf "%d %s * * *" "$((RANDOM % 60))" "$((RANDOM % 6))-23/6" diff --git a/rsync-backup/resources/bin/mirror-dir b/rsync-backup/resources/bin/mirror-dir index 725cabc4..c8be43de 100755 --- a/rsync-backup/resources/bin/mirror-dir +++ b/rsync-backup/resources/bin/mirror-dir @@ -1,5 +1,14 @@ #!/bin/bash +## +## Here's an example crontab: +## +## SHELL=/bin/sh +## PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin +## +## 49 */2 * * * root mirror-dir run -d core-05.0k.io:10023 -u rsync /etc /home /opt/apps 2>&1 | logger -t mirror-dir +## + #:- . /etc/shlib @@ -8,189 +17,570 @@ include common include parse include process +include cmdline +include array depends shyaml lock -[ "$UID" != "0" ] && echo "You must be root." && exit 1 ## -## Here's an example crontab: +## Functions ## -## SHELL=/bin/sh -## PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin + + +MIRROR_DIR_LOG=/var/log/mirror-dir.log +MIRROR_DIR_REPORT_MAX_READ_LINE=1000000 +R_DATE='[0-9]{4,4}-[01][0-9]-[0-3][0-9] [012][0-9]:[0-5][0-9]:[0-5][0-9][+-][01][0-9][0-5][0-9]' + +mirror-dir:report() { + + local s1 s2 s3 d1 d2 d3 host source sent received rate + + while read s1 s2 d1 d2 host source sent received rate; do + s=$(date -d"$s1 $s2" --rfc-3339=seconds) + s_s=$(date -d"$s1 $s2" +%s) + d_s=$(date -d"$d1 $d2" +%s) + duration=$((d_s - s_s)) + printf "%s %-15s %-30s | %s %s %s %10s\n" \ + "$s" "$host" "$source" "$sent" "$received" "$rate" "$(print_duration "$duration")" + done < <( + tail "$MIRROR_DIR_LOG" -n "$MIRROR_DIR_REPORT_MAX_READ_LINE" | + egrep "^${R_DATE} (Starting|sent)" | + sed -r 's/\s*\(.*\)$//g + s/ (([0-9]{1,3},)*[0-9]{1,3})(\.[0-9]{2,2})? bytes(\/sec)?/:\1/g + s/,//g + s/ :([0-9]+)$/ rate:\1/g' | + grep -v "^--$" | + sed -r "/Starting/N; + {s/\n(${R_DATE} )(.*)sent/ \1 sent/g}" | + sed -r "s/^(${R_DATE} )Starting rsync: ([^ ]+) -> ([^ ]+) (${R_DATE} )/\1\4\3 \2/g + s/ +/ /g + s/ [a-z]+:/ /g" | + egrep "^${R_DATE} ${R_DATE} [^ ]+ /[^ ]+ [0-9]+ [0-9]+ [0-9]+$" + ) | + numfmt --field=6,7 --to=iec-i --suffix=B --padding=8 | + numfmt --field=8 --to=iec-i --suffix=B/s --padding=10 | + sed -r 's/ \| / /g' +} + + +mirror-dir:run() { + + local hostname="$1" dests="$2" source_dirs + shift 2 + + dests=($dests) ## individual dests can't use any space-like separators + source_dirs=("$@") + + dest_path=/var/mirror/$hostname + state_dir=/var/run/mirror-dir + mkdir -p "$state_dir" + rsync_options=( + ${RSYNC_OPTIONS:-} --stats --out-format='%i %o %f %l %b') + ssh_options=(${SSH_OPTIONS:--o StrictHostKeyChecking=no}) + + for dest in "${dests[@]}"; do + dest_rsync_options=("${rsync_options[@]}") + if [[ "$dest" == *"/"* ]]; then + dest_rsync_options+=("--bwlimit" "${dest##*/}") + dest="${dest%/*}" + fi + dest_for_session="$dest" + + for d in "${source_dirs[@]}"; do + + current_rsync_options=("${dest_rsync_options[@]}") + + session_id="$(echo "${dest_for_session}$d" | md5_compat)" + session_id="${session_id:1:8}" + + if [[ "$dest" == *":"* ]]; then + ssh_options+=("-p" "${dest#*:}") + dest="${dest%%:*}" + fi + + dirpath="$(dirname "$d")" + if [ "$dirpath" == "/" ]; then + dir="/$(basename "$d")" + else + dir="$dirpath/$(basename "$d")" + fi + + [ -d "$dir" ] || { + warn "ignoring '$dir' as it is not existing." + continue + } + + lock_label=$exname-$hostname-${session_id} + + tmp_exclude_patterns=/tmp/${lock_label}.exclude_patterns.tmp + ## Adding the base of the dir if required... seems necessary with + ## the rsync option that replicate the full path. + has_exclude_pattern= + while read-0 exclude_dir; do + if [ -z "$has_exclude_pattern" ]; then + echo "Adding exclude patterns for source '$dir':" >&2 + has_exclude_pattern=1 + fi + if [[ "$exclude_dir" == "/"* ]]; then + exclude_dir="$dir${exclude_dir}" + fi + echo " - $exclude_dir" >&2 + p0 "$exclude_dir" + done < <(get_exclude_patterns "$dir") > "$tmp_exclude_patterns" + if [ -n "$has_exclude_pattern" ]; then + current_rsync_options+=("-0" "--exclude-from"="$tmp_exclude_patterns") + else + echo "No exclude patterns for '$dir'." + fi + echo --------------------------------- + echo "Starting rsync: $d -> $dest ($(date))" + cmd=( + nice -n 15 \ + rsync "${current_rsync_options[@]}" -azvARH \ + -e "sudo -u $user ssh ${ssh_options[*]}" \ + --delete --delete-excluded \ + --partial --partial-dir .rsync-partial \ + --numeric-ids "$dir/" "$user@$dest":"$dest_path" + ) + echo "${cmd[@]}" + start="$SECONDS" + retry=1 + errlvls=() + while true; do + lock "$lock_label" -v -D -k -- "${cmd[@]}" + errlvl="$?" + case "$errlvl" in + 20) ## Received SIGUSR1, SIGINTT + echo "!! Rsync received SIGUSR1 or SIGINT." + echo " .. Full interruption while $d -> $dest and after $((SECONDS - start))s" + append_trim "${state_dir}/${session_id}-fail" \ + "$dest $d $((SECONDS - start)) signal SIGUSR1, SIGINT or SIGHUP" + break 2 + ;; + 137|143) ## killed SIGKILL, SIGTERM + echo "!! Rsync received $(kill -l "$errlvl")" + echo " .. Full interruption while $d -> $dest and after $((SECONDS - start))s" + append_trim "${state_dir}/${session_id}-fail" \ + "$dest $d $((SECONDS - start)) signal: $(kill -l "$errlvl")" + break 2 + ;; + 0) + echo "Rsync finished with success $d -> $dest in $((SECONDS - start))s" + append_trim "${state_dir}/${session_id}-success" \ + "$dest $d $((SECONDS - start)) OK" + break + ;; + *) + errlvls+=("$errlvl") + echo "!! Rsync failed with an errorlevel $errlvl after $((SECONDS - start))s since start." + if [ "$retry" -lt 3 ]; then + echo "!! Triggering a retry ($((++retry))/3)" + continue + else + echo "!! Tried 3 times, bailing out." + echo " .. interruption of $d -> $dest after $((SECONDS - start))s" + append_trim "${state_dir}/${session_id}-fail" \ + "$dest $d $((SECONDS - start))" \ + "Failed after 3 retries (errorlevels: ${errlvls[@]})" + break + fi + ;; + esac + done + if [ -n "$has_exclude_pattern" ]; then + rm -fv "$tmp_exclude_patterns" + fi + done + done + +} + + + +get_exclude_patterns() { + local dir="$1" + [ -e "$config_file" ] || return + cat "$config_file" | shyaml get-values-0 "${dir//.\\./}.exclude" 2>/dev/null +} + +append_trim() { + local f="$1" + shift + e "$(date --rfc-3339=s) $*"$'\n' >> "$f" && + tail -n 5000 "$f" > "$f".tmp && + mv "$f"{.tmp,} +} + + +log_tee() { tee -a "$MIRROR_DIR_LOG"; } +log_file() { cat >> "$MIRROR_DIR_LOG"; } + + +get_ids() { + local session_id id_done + declare -A id_done + for file in "$state_dir"/*{-fail,-success}; do + session_id=${file%-*} + [ "${id_done["$session_id"]}" ] && continue + id_done["$session_id"]=1 + echo "${session_id##*/}" + done +} + + +mirror-dir:_get_sources() { + local DIR=("$@") + + config_file="/etc/$exname/config.yml" + + if [ "${#DIR[@]}" == 0 ]; then + if [ -e "$config_file" ]; then + info "No source provided on command line," \ + "reading '$config_file' for default sources" + DIR=($(eval echo $(shyaml get-values default.sources < "$config_file"))) + fi + fi + array_values_to_stdin DIR +} + + +[[ "${BASH_SOURCE[0]}" != "${0}" ]] && SOURCED=true + +version=0.1 +desc='Manage mirroring of local directory to distant hosts' +help="" + + + ## -## 49 */2 * * * root mirror-dir -d core-05.0k.io:10023 -u rsync /etc /home /opt/apps 2>&1 | logger -t mirror-dir +## Code ## -usage="usage: $exname -d DEST1 [-d DEST2 [...]] [-u USER] [DIR1 [DIR2 ...]] -Preserve as much as possible the source structure, keeping hard-links, acl, -exact numerical uids and gids, and being able to resume in very large files. +cmdline.spec.gnu -Options: - DIR1 ... DIRn - Local directories that should be mirrored on destination(s). - examples: /etc /home /var/backups +cmdline.spec.gnu backup - If no directories are provided, the config file root - entries will be used all as destination to copy. +cmdline.spec:backup:valued:-d,--dest:run() { + dests+=("$1") +} - -d DESTn - Can be repeated. Specifies host destination towards which - files will be send. Note that you can specify port number after - a colon and a bandwidth limit for rsync after a '/'. +dests=() - examples: -d liszt.musicalta:10022 -d 10.8.0.19/200 +cmdline.spec::cmd:backup:run() { - -u USER (default: 'backuppc') +# usage="usage: $exname -d DEST1 [-d DEST2 [...]] [-u USER] [DIR1 [DIR2 ...]] - Local AND destination user to log as at both ends to transfer file. - This local user need to have a NOPASSWD ssh login towards it's - account on destination. This destination account should have - full permissions access without passwd to write with rsync-server - in the destination directory. +# Preserve as much as possible the source structure, keeping hard-links, acl, +# exact numerical uids and gids, and being able to resume in very large files. - -h STORE (default is taken of the hostname file) - Set the destination store, this is the name of the directory where - the files will all directories will be copied. Beware ! if 2 hosts - use the same store, this means they'll conflictingly update the - same destination directory. Only use this if you know what you - are doing. +# " -" + : :posarg: [DIR...] 'Local directories that should be mirrored + on destination(s). -dests=() -source_dirs=() -hostname= -while [ "$#" != 0 ]; do - case "$1" in - "-d") - dests+=("$2") - shift - ;; - "-h") - hostname="$2" - shift - ;; - "-u") - user="$2" - shift - ;; - *) - source_dirs+=("$1") - ;; - esac - shift -done + Examples: /etc /home /var/backups + + If no directories are provided, the config + file root entries will be used all as + destination to copy.' + + : :optval: -d,--dest 'Can be repeated. Specifies host + destination towards which files will be + send. Note that you can specify port + number after a colon and a bandwidth limit + for rsync after a '/'. + + Examples: -d liszt.musicalta:10022 + -d 10.8.0.19/200' + + + : :optval: -u,--user "(default: 'rsync') + + Local AND destination system user to log + as at both ends to transfer file. This + local user need to have a no password ssh + login to it's own account on destination. + This destination account should have full + permissions access without passwd to write + with rsync-server in the destination + directory." + : :optval: -h,--hostname "(default is taken of the hostname file) -if test -z "$hostname"; then - hostname=$(hostname) -fi + Set the destination store, this is the + name of the directory where the files + will all directories will be copied. + Beware ! if 2 hosts use the same store, + this means they'll conflictingly update + the same destination directory. Only + use this if you know what you are + doing." -if test -z "$hostname"; then - die "Couldn't figure a valid hostname. Please specify one with \`\`-h STORENAME\`\`." -fi + : :optfla: -q,--quiet "Prevent output on stderr. Please note that + output is always written in log file." -user=${user:-backuppc} -dest_path=/var/mirror/$hostname -config_file="/etc/$exname/config.yml" + [ "$UID" != "0" ] && echo "You must be root." && exit 1 -if [ "${#source_dirs[@]}" == 0 ]; then - if [ -e "$config_file" ]; then - echo "No source provided on command line.. " - echo " ..so reading '$config_file' for default sources..." - source_dirs=($(eval echo $(shyaml get-values default.sources < "$config_file"))) + [ -n "$opt_hostname" ] || opt_hostname=$(hostname) + + if [ -n "$opt_quiet" ]; then + log_facility=log_file + else + log_facility=log_tee + fi + + if [ -z "$opt_hostname" ]; then + err "Couldn't figure a valid hostname. Please specify one with \`\`-h STORENAME\`\`." + return 1 fi - if [ "${#source_dirs[@]}" == 0 ]; then + + user=${opt_user:-rsync} + + config_file="/etc/$exname/config.yml" + + array_read-0 DIR < <( + { + { + mirror-dir:_get_sources "${DIR[@]}" + } 3>&1 1>&2 2>&3 | "$log_facility" + } 3>&1 1>&2 2>&3 + ) + + if [ "${#DIR[@]}" == 0 ]; then err "You must specify at least one source directory to mirror" \ - "on command line (or in a config file)." - print_usage + "on command line (or in a config file)." + echo "$usage" >&2 exit 1 fi -fi -echo "Sources directories are: ${source_dirs[@]}" + info "Source directories are: ${DIR[@]}" 2>&1 | "$log_facility" -if [ "${#dests[@]}" == 0 ]; then - err "You must specify at least a destination." - print_usage - exit 1 -fi + if [ "${#dests[@]}" == 0 ]; then + err "You must specify at least a destination (using \`\`-d\`\` or \`\`--dest\`\`)." + echo "$usage" >&2 + return 1 + fi -rsync_options=(${RSYNC_OPTIONS:-}) -ssh_options=(${SSH_OPTIONS:-}) + ## XXXvlab: note that we use here a special version of awk supporting + ## ``strftime``. This is only to prefix a date to the logs. Yes, we know + ## about ``--out-format`` and its ``%t`` which would be ideal, but it + ## doesn't output proper UTC time (it is system time, no timezone info). + mirror-dir:run "$opt_hostname" "${dests[*]}" "${DIR[@]}" 2>&1 | + awk -W interactive '{ print strftime("%Y-%m-%d %H:%M:%S%z"), $0 }' | + "$log_facility" -get_exclude_patterns() { - local dir="$1" - [ -e "$config_file" ] || return - cat "$config_file" | shyaml get-values-0 "$(echo "$dir" | sed -r 's%\.%\\.%g').exclude" } -for dest in "${dests[@]}"; do - for d in "${source_dirs[@]}"; do - current_rsync_options=("${rsync_options[@]}") + +cmdline.spec.gnu report +cmdline.spec::cmd:report:run() { + mirror-dir:report +} + + + +cmdline.spec:check:valued:-d,--dest:run() { + dests+=("$1") +} + +cmdline.spec.gnu check +cmdline.spec::cmd:check:run() { + +# usage="usage: $exname -d DEST1 [-d DEST2 [...]] [DIR1 [DIR2 ...]] + +# Checks that mirror-dir did it's job. Will send an email if not. +# " + + + : :posarg: [DIR...] 'Local directories that should be mirrored + on destination(s). + + Examples: /etc /home /var/backups + + If no directories are provided, the config + file root entries will be used all as + destination to copy.' + + : :optval: -d,--dest 'Can be repeated. Specifies host + destination towards which files will be + send. Note that you can specify port + number after a colon and a bandwidth limit + for rsync after a '/'. + + Examples: -d liszt.musicalta:10022 + -d 10.8.0.19/200' + + : :optval: -n,--time-spec "Give a full English time spec about how + old the last full run of rsync should + be at most. Defaults to '12 hours'. + + Examples: -n '12 hours' + -n '1 day'" + + : :optfla: -m,--mail-alert "Send alert via email. This is intended to + use in cron." + + + [ "$UID" != "0" ] && echo "You must be root." && exit 1 + + if [ "${#dests[@]}" == 0 ]; then + err "You must specify at least a destination (using \`\`-d\`\` or \`\`--dest\`\`)." + echo "$usage" >&2 + return 1 + fi + + if [ -n "$opt_mail_alert" ]; then + CHECK_DEFAULT_SOURCE=/etc/default/alerting + [ -f "$CHECK_DEFAULT_SOURCE" ] && . "$CHECK_DEFAULT_SOURCE" + + if [ "${#MAIL_DESTS[@]}" == 0 ]; then + echo "You must set at least one recipient destination for mails." >&2 + echo " You can do that in '$CHECK_DEFAULT_SOURCE', using the variable" >&2 + echo " '\$MAIL_DESTS'. Note this is a bash array variable." >&2 + exit 1 + fi + fi + + array_read-0 DIR < <(mirror-dir:_get_sources "${DIR[@]}") + + if [ "${#DIR[@]}" == 0 ]; then + err "You must specify at least one source directory to mirror" \ + "on command line (or in a config file)." + echo "$usage" >&2 + exit 1 + fi + + time_spec="${opt_time_spec:-12 hours}" + + state_dir=/var/run/mirror-dir + + ## Getting max string length of source + dir_max_len=0 + for d in "${DIR[@]}"; do + [ "$dir_max_len" -lt "${#d}" ] && + dir_max_len="${#d}" + done + + ## Getting max string length of dests + dest_max_len=0 + for d in "${dests[@]}"; do + [ "$dest_max_len" -lt "${#d}" ] && + dest_max_len="${#d}" + done + + declare -A sessions=() + bad_sessions=() + msg=() + for dest in "${dests[@]}"; do if [[ "$dest" == *"/"* ]]; then current_rsync_options+=("--bwlimit" "${dest##*/}") dest="${dest%/*}" fi - if [[ "$dest" == *":"* ]]; then - ssh_options+=("-p" "${dest#*:}") - dest="${dest%%:*}" - fi + for d in "${DIR[@]}"; do + session_id="$(echo "$dest$d" | md5_compat)" + session_id="${session_id:1:8}" + sessions["$session_id"]="$dest $d" + f=$(find "$state_dir" \ + -maxdepth 1 -newermt "-$time_spec" \ + -type f -name "${session_id}-success") + if [ -z "$f" ]; then + if [ -e "$state_dir/${session_id}-success" ]; then + msg+=("$(printf "%-${dest_max_len}s %-${dir_max_len}s last full sync %s" \ + "$dest" "$d" \ + "$(stat -c %y "$state_dir/${session_id}-success" | + sed -r 's/\.[0-9]{9,9} / /g')")") + else + msg+=("$(printf "%-${dest_max_len}s %-${dir_max_len}s never finished yet" \ + "$dest" "$d")") + fi + bad_sessions+=("$session_id") + fi + done + done - dirpath="$(dirname "$d")" - if [ "$dirpath" == "/" ]; then - dir="/$(basename "$d")" - else - dir="$dirpath/$(basename "$d")" - fi + [ "${#msg[@]}" == 0 ] && return 0 + + if [ -z "$opt_mail_alert" ]; then + echo + echo "${DARKRED}These destination/source directory were" \ + "last synced more than $time_spec ago:${NORMAL}" + for m in "${msg[@]}"; do + printf " %s\n" "$m" + done + echo + echo "${DARKRED}Last failed logs:${NORMAL}" + for m in "${bad_sessions[@]}"; do + if [ -e "${state_dir}"/$m-fail ]; then + echo " ${sessions[$m]}:" + tail -n 5 "${state_dir}"/$m-fail | cut -f 1,2,5- -d " " | sed -r "s/^/ /g" + echo + else + echo " ${sessions[$m]}: no fail log available" + fi + done + return 1 + fi - [ -d "$dir" ] || { - warn "ignoring '$dir' as it is not existing." - continue - } - lock_label=$exname-$hostname-$(echo "$dest" | md5_compat | cut -f 1 -d " ") + ## + ## Mail + ## - exclude_patterns="$(get_exclude_patterns "$dir")" + if [ "${#msg[@]}" != 0 ]; then - tmp_exclude_patterns=/tmp/${lock_label}.$(echo "$d" | md5_compat | cut -f 1 -d " ").exclude_patterns.tmp - if [ "$exclude_patterns" ]; then - echo "Adding exclude patterns..." + cat < "$tmp_exclude_patterns" - cat "$tmp_exclude_patterns" | xargs -0 -n 1 echo - current_rsync_options=("-0" "--exclude-from"="$tmp_exclude_patterns" "${current_rsync_options[@]}") + Some configured mirroring targets have not finished gracefully in + the last $time_spec. Please see for yourself: + +$( + for m in "${msg[@]}"; do + echo " $m" + done +) + + You might want to find these following information of some use: + +$( + + for m in "${bad_sessions[@]}"; do + if [ -e "${state_dir}"/$m-fail ]; then + echo " ${sessions[$m]}:" + tail -n 5 "${state_dir}"/$m-fail | cut -f 1,2,5- -d " " | sed -r "s/^/ /g" + echo else - echo "No exclude patterns for '$dir'." + echo " ${sessions[$m]}: no fail log available" fi + done - echo --------------------------------- - date +) - echo nice -n 15 rsync "${current_rsync_options[@]}" -azvARH -e "'sudo -u $user ssh ${ssh_options[*]}'" --delete --delete-excluded --partial --partial-dir .rsync-partial --numeric-ids "$dir/" "$user@$dest":"$dest_path" + Hoping all this will help you sort out the issue... - lock "$lock_label" -v -D -k -- \ - nice -n 15 \ - rsync "${current_rsync_options[@]}" -azvARH \ - -e "sudo -u $user ssh ${ssh_options[*]}" \ - --delete --delete-excluded --partial --partial-dir .rsync-partial \ - --numeric-ids "$dir/" "$user@$dest":"$dest_path" + Yours sincerly, +-- +mirror-dir-check - rm -fv "$tmp_exclude_patterns" - done -done +PS: You received this email because your email is listed in +\$MAIL_DESTS of '$CHECK_DEFAULT_SOURCE' of '$(hostname)' +(also known as $(cat /etc/mailname)). + +EOF + + fi + + +} + + + + +cmdline::parse "$@" diff --git a/searx/hooks/web_proxy-relation-joined b/searx/hooks/web_proxy-relation-joined new file mode 100755 index 00000000..20c0c918 --- /dev/null +++ b/searx/hooks/web_proxy-relation-joined @@ -0,0 +1,12 @@ +#!/bin/bash + +set -e + +URL=$(relation-get url) || exit 1 + +config-add "\ +services: + $MASTER_BASE_SERVICE_NAME: + environment: + BASE_URL: $URL +" diff --git a/searx/metadata.yml b/searx/metadata.yml new file mode 100644 index 00000000..ce183af5 --- /dev/null +++ b/searx/metadata.yml @@ -0,0 +1,16 @@ +docker-image: docker.0k.io/searx:0.18.0-237 ## from: searx/searx:0.18.0-237-e2fb5008 +config-resources: + - /etc/searx + +default-options: + +uses: + web-proxy: + #constraint: required | recommended | optional + #auto: pair | summon | none ## default: pair + constraint: recommended + auto: pair + solves: + proxy: "Public access" + default-options: + target: !var-expand ${MASTER_BASE_SERVICE_NAME}:8080 diff --git a/sftp/README.org b/sftp/README.org new file mode 100644 index 00000000..f8c38b07 --- /dev/null +++ b/sftp/README.org @@ -0,0 +1,75 @@ +* Presentation + +This charm allows you to host a SFTP (using ssh) with it's own user +database. They can be authenticated with a password or with a SSH key. + +You can choose exactly what data will be accessible to them by mount +binding each directory you want to share from the host in their own +home directory in the container. (see the examples). + +The permissions should be managed through group permissions, directly +from the host and in the shared directory. + +Each user in the container will be part of multiple groups +(configurable via the options of the charm in your service definition +of the =compose.yml=), and the GID of the groups will be the same on +the host and on the container. + +* Example configuration + +#+begin_src yaml + sftp: + docker-compose: + ports: + - "10622:22" + volumes: + ## Here we allow access to specific directories only by binding + ## them in their home directory: + - /srv/datastore/data/www/var/www/www.myclientwebsite.com:/home/myclient1/www.myclientwebsite.com:rw + - /srv/datastore/data/www/var/www/www.myclientwebsite.com:/home/myclient2/www.myclientwebsite.com:rw + options: + users: + myclient1: + ## These groups are created on the container with the given GID + ## Note that UID/GID are the same for the container and the host, + ## So don't forget to give the appropriate rights from the host on + ## the shared directory to ensure that access is effectively granted + ## as you want to the customer + groups: + - sftpaccess-rw:3000 + password: FaKePaSSw0rdT0Ch4Ng3 + keys: + - "ssh-rsa AAAAB3NzaC2yc2Z..." + myclient2: + ## These groups are created on the container with the given GID + ## Note that UID/GID are the same for the container and the host, + ## So don't forget to give the appropriate rights from the host on + ## the shared directory to ensure that access is effectively granted + ## as you want to the customer + groups: + - sftpaccess-rw:3000 + password: FaKePaSSw0rdT0Ch4Ng3 + keys: + - "ssh-rsa AAAAB3NzBC1yc2X..." +#+end_src + + +In this case, you'll need also to make sure to set up correctly the +directories you shared, in this example, only +=/srv/datastore/data/www/var/www/www.myclientwebsite.com= is shared : +you are expected to set the permissions of the group identified by the +id `3000`. + +Using getfacl/setfacl is the right tool most of the time. If you don't +have it: + +#+begin_src sh +apt-get install acl +#+end_src + +Then, you could: + +#+begin_src sh +find /srv/datastore/data/www/var/www/www.myclientwebsite.com -type d \ + -exec getfacl -mR d:g:3000:rwx,d:g:3000:rwx +#+end_src