forked from 0k/0k-charms
Browse Source
Merge pull request 'maj master' (#1) from 0k/0k-charms:master into master
Merge pull request 'maj master' (#1) from 0k/0k-charms:master into master
Reviewed-on: https://git.myceliandre.fr/StephanSainleger/0k-charms/pulls/1master
StephanSainleger
3 years ago
102 changed files with 3097 additions and 483 deletions
-
17apache/build/Dockerfile
-
4bitwarden/metadata.yml
-
12codimd/hooks/init
-
4cron/build/Dockerfile
-
8cron/hooks/init
-
20cron/hooks/pre_deploy
-
4cron/metadata.yml
-
2cyclos/hooks/init
-
63cyclos/hooks/pre_deploy
-
61cyclos/lib/common
-
6cyclos/metadata.yml
-
BINcyclos/src/init.sql.gz
-
14drone/metadata.yml
-
33etherpad/README.org
-
39etherpad/hooks/init
-
26etherpad/hooks/postgres_database-relation-joined
-
53etherpad/metadata.yml
-
3gitea/metadata.yml
-
47gogocarto/README.org
-
4gogocarto/hooks/init
-
2gogocarto/hooks/mongo_database-relation-joined
-
11gogocarto/hooks/publish_dir-relation-joined
-
44gogocarto/hooks/schedule_commands-relation-joined
-
44gogocarto/lib/common
-
5gogocarto/metadata.yml
-
12hedgedoc/hooks/init
-
0hedgedoc/hooks/postgres_database-relation-joined
-
22hedgedoc/hooks/web_proxy-relation-joined
-
4hedgedoc/metadata.yml
-
2logrotate/build/src/entrypoint.sh
-
19mariadb/build/Dockerfile
-
20mariadb/build/src/entrypoint.sh
-
48mariadb/hooks/init
-
102mariadb/hooks/install.d/60-backup.sh
-
3mariadb/hooks/schedule_command-relation-joined
-
4mariadb/metadata.yml
-
42mariadb/resources/bin/mysql-backup
-
27monujo/hooks/init
-
17monujo/metadata.yml
-
4mysql/hooks/install
-
13nextcloud/build/Dockerfile
-
14nextcloud/build/database-accept-dots.patch
-
2nextcloud/metadata.yml
-
50odoo-tecnativa/actions/install
-
20onlyoffice/hooks/init
-
3onlyoffice/hooks/nextcloud_app-relation-joined
-
34onlyoffice/hooks/postgres_database-relation-joined
-
36onlyoffice/metadata.yml
-
14peertube/build/Dockerfile
-
26peertube/build/dbname.patch
-
1peertube/hooks/init
-
2peertube/hooks/postgres_database-relation-joined
-
2postgres/metadata.yml
-
4precise/0k-odoo-light/hooks/install
-
2precise/apt-cacher/hooks/install
-
42precise/base-0k/hooks/install.d/00-base.sh
-
2precise/base-0k/hooks/install.d/05-shyaml.sh
-
6precise/base-0k/hooks/install.d/20-kal-scripts.sh
-
80precise/base-0k/hooks/install.d/30-customize.sh
-
2precise/ca/hooks/install
-
2precise/git/hooks/install
-
4precise/host/hooks/install.d/38-ntp.sh
-
3precise/host/hooks/install.d/39-logrotate.sh
-
5precise/host/hooks/install.d/40-btrfs.sh
-
45precise/host/hooks/install.d/50-lxc.sh
-
38precise/host/hooks/install.d/60-docker.sh
-
1precise/host/hooks/install.d/61-mirror-dir.sh
-
23precise/host/hooks/install.d/70-0k.sh
-
23precise/host/hooks/install.d/75-fail2ban.sh
-
19precise/host/hooks/install.d/80-dns-waterfall.sh
-
253precise/host/hooks/install.d/90-shorewall.sh
-
37precise/host/hooks/install.d/95-checks.sh
-
74precise/host/hooks/install.d/96-backup-lxc.sh
-
2precise/mirror/hooks/install
-
2precise/pypi-cacher/hooks/install
-
2precise/svn/hooks/install
-
8precise/vpn/hooks/install
-
60rocketchat/README.org
-
4rocketchat/metadata.yml
-
125rsync-backup-target/README.org
-
10rsync-backup-target/build/Dockerfile
-
42rsync-backup-target/build/entrypoint.sh
-
7rsync-backup-target/build/src/etc/sudoers.d/recover
-
3rsync-backup-target/build/src/etc/sudoers.d/rsync
-
76rsync-backup-target/build/src/usr/local/sbin/request-recovery-key
-
106rsync-backup-target/build/src/usr/local/sbin/ssh-admin-cmd-validate
-
66rsync-backup-target/build/src/usr/local/sbin/ssh-cmd-validate
-
152rsync-backup-target/build/src/usr/local/sbin/ssh-key
-
97rsync-backup-target/build/src/usr/local/sbin/ssh-recover-cmd-validate
-
68rsync-backup-target/build/src/usr/local/sbin/ssh-update-keys
-
73rsync-backup-target/hooks/init
-
83rsync-backup-target/hooks/log_rotate-relation-joined
-
12rsync-backup-target/metadata.yml
-
50rsync-backup-target/resources/bin/compose-add-rsync-key
-
2rsync-backup/build/Dockerfile
-
38rsync-backup/hooks/install.d/60-install.sh
-
1rsync-backup/hooks/schedule_command-relation-joined
-
2rsync-backup/metadata.yml
-
652rsync-backup/resources/bin/mirror-dir
-
12searx/hooks/web_proxy-relation-joined
@ -1,12 +0,0 @@ |
|||
#!/bin/bash |
|||
|
|||
init-config-add "\ |
|||
$SERVICE_NAME: |
|||
environment: |
|||
CMD_USECDN: \"false\" |
|||
" |
|||
|
|||
## ``codimd`` create uploads folder with wrong permission |
|||
|
|||
mkdir -p "$SERVICE_DATASTORE/home/hackmd/app/public/uploads" |
|||
chown -R 1500:1500 "$SERVICE_DATASTORE/home/hackmd/app/public/" |
@ -0,0 +1,20 @@ |
|||
#!/bin/bash |
|||
## Should be executable N time in a row with same result. |
|||
|
|||
set -e |
|||
|
|||
cron_config_hash() { |
|||
debug "Adding config hash to enable recreating upon config change." |
|||
config_hash=$({ |
|||
find "$SERVICE_CONFIGSTORE/etc/cron"{,.hourly,.weekly,.daily,.monthly} \ |
|||
-type f -exec md5sum {} \; |
|||
} | md5_compat) || exit 1 |
|||
init-config-add " |
|||
$MASTER_BASE_SERVICE_NAME: |
|||
labels: |
|||
- compose.config_hash=$config_hash |
|||
" |
|||
} |
|||
|
|||
|
|||
cron_config_hash || exit 1 |
@ -0,0 +1,63 @@ |
|||
#!/bin/bash |
|||
|
|||
## |
|||
## Get domain in option of relation "web-proxy" |
|||
## |
|||
|
|||
## XXXvlab: there is a tiny lapse of time where database is not yet |
|||
## installed, and admin password is the default value. |
|||
|
|||
|
|||
. lib/common |
|||
|
|||
set -ex |
|||
|
|||
|
|||
admin_password=$(options-get admin-password 2>/dev/null ) || exit 1 |
|||
|
|||
CONTROL_PASSWORD_FILE="$SERVICE_DATASTORE/.control-pass" |
|||
## Was it already properly propagated to database ? |
|||
control_password=$(H "${admin_password}") |
|||
if ! [ -e "$CONTROL_PASSWORD_FILE" ] || [ "$control_password" != "$(cat "$CONTROL_PASSWORD_FILE")" ]; then |
|||
|
|||
hash="$(htpasswd -nbBC 10 USER "$admin_password" | cut -f 2- -d :)" || { |
|||
err "Couldn't generate hash for admin password." |
|||
exit 1 |
|||
} |
|||
|
|||
if ! sql < <(e " |
|||
UPDATE passwords SET value = '$hash' |
|||
WHERE user_id = 1 |
|||
AND status = 'ACTIVE' |
|||
AND password_type_id in ( |
|||
SELECT id FROM password_types |
|||
WHERE input_method = 'TEXT_BOX' |
|||
AND password_mode = 'MANUAL'); |
|||
"); then |
|||
debug "Failed to set password for admin users." |
|||
exit 1 |
|||
fi |
|||
mkdir -p "${CONTROL_PASSWORD_FILE%/*}" |
|||
e "$control_password" > "$CONTROL_PASSWORD_FILE" |
|||
fi |
|||
|
|||
|
|||
|
|||
url=$(named-relation-get "web-proxy" url) || exit 1 |
|||
|
|||
CONTROL_URL_FILE="$SERVICE_DATASTORE/.control-url" |
|||
## Was it already properly propagated to database ? |
|||
control_url=$(H "${url}") |
|||
if ! [ -e "$CONTROL_URL_FILE" ] || [ "$control_url" != "$(cat "$CONTROL_URL_FILE")" ]; then |
|||
## In ``configurations`` table, columns login_url, logout_url, root_url |
|||
|
|||
if ! sql < <(e " |
|||
UPDATE configurations |
|||
SET |
|||
root_url = '$url' |
|||
"); then |
|||
debug "Failed to set password for admin users." |
|||
exit 1 |
|||
fi |
|||
e "$control_password" > "$CONTROL_URL_FILE" |
|||
fi |
@ -0,0 +1,33 @@ |
|||
# -*- ispell-local-dictionary: "english" -*- |
|||
|
|||
#+TITLE: Etherpad Charm |
|||
|
|||
* Upgrade |
|||
|
|||
Based on https://github.com/ether/etherpad-lite , following: |
|||
|
|||
https://github.com/ether/etherpad-lite/blob/develop/doc/docker.md |
|||
|
|||
Used: |
|||
|
|||
#+begin_src sh |
|||
TAG=1.8.14 |
|||
git clone https://github.com/ether/etherpad-lite --depth 1 -b $TAG |
|||
docker build --build-arg INSTALL_SOFFICE=1 \ |
|||
--build-arg ETHERPAD_PLUGINS=" \ |
|||
ep_font_family ep_mammoth ep_comments_page ep_table_of_contents \ |
|||
ep_markdown ep_image_upload ep_spellcheck ep_headings2 ep_align \ |
|||
ep_who_did_what ep_what_have_i_missed ep_embedmedia \ |
|||
ep_openid_connect ep_rss ep_git_commit_saved_revision" \ |
|||
. -t docker.0k.io/etherpad:${TAG}-0k |
|||
docker push docker.0k.io/etherpad:${TAG}-0k |
|||
#+end_src |
|||
|
|||
|
|||
* Admin password |
|||
|
|||
We choose to NOT include admin panel as it allows to change settings |
|||
and install plugins but this will not allow to reproduce an install |
|||
easily. We can do this on the =compose.yml= side in a reproducible |
|||
manner. |
|||
|
@ -0,0 +1,39 @@ |
|||
#!/bin/bash |
|||
|
|||
## Init is run on host |
|||
## For now it is run every time the script is launched, but |
|||
## it should be launched only once after build. |
|||
|
|||
## Accessible variables are: |
|||
## - SERVICE_NAME Name of current service |
|||
## - DOCKER_BASE_IMAGE Base image from which this service might be built if any |
|||
## - SERVICE_DATASTORE Location on host of the DATASTORE of this service |
|||
## - SERVICE_CONFIGSTORE Location on host of the CONFIGSTORE of this service |
|||
|
|||
|
|||
. lib/common |
|||
|
|||
set -e |
|||
|
|||
|
|||
dirs=( |
|||
"$SERVICE_DATASTORE/var/lib/etherpad" |
|||
) |
|||
|
|||
uid_gid=($(docker_get_uid_gid "$SERVICE_NAME" "etherpad" "etherpad")) || { |
|||
err "Could not fetch uid/gid on image of service ${DARKYELLOW}$SERVICE_NAME${NORMAL}." |
|||
return 1 |
|||
} |
|||
|
|||
uid="${uid_gid[0]}" |
|||
gid="${uid_gid[1]}" |
|||
for dir in "${dirs[@]}"; do |
|||
mkdir -p "$dir" |
|||
find "$dir" \! -uid "$uid" -print0 | while read-0 f; do |
|||
chown -v "$uid" "$f" || return 1 |
|||
done |
|||
find "$dir" \! -gid "$gid" -print0 | while read-0 f; do |
|||
chgrp -v "$gid" "$f" || return 1 |
|||
done |
|||
done |
|||
|
@ -0,0 +1,26 @@ |
|||
#!/bin/bash |
|||
|
|||
set -e |
|||
|
|||
PASSWORD="$(relation-get password)" |
|||
USER="$(relation-get user)" |
|||
DBNAME="$(relation-get dbname)" |
|||
|
|||
control=$(echo -en "$USER\0$DBNAME\0$PASSWORD\0$ADMIN_PASSWORD" | md5_compat) |
|||
|
|||
config-add "\ |
|||
services: |
|||
$MASTER_BASE_SERVICE_NAME: |
|||
environment: |
|||
DB_TYPE: postgres |
|||
DB_HOST: \"$MASTER_TARGET_SERVICE_NAME\" |
|||
DB_NAME: \"$DBNAME\" |
|||
DB_PASS: \"$PASSWORD\" |
|||
DB_USER: \"$USER\" |
|||
" |
|||
|
|||
[ "$control" == "$(relation-get control 2>/dev/null)" ] && exit 0 |
|||
|
|||
relation-set control "$control" |
|||
|
|||
info "Configured $SERVICE_NAME code for $TARGET_SERVICE_NAME access." |
@ -0,0 +1,53 @@ |
|||
name: etherpad |
|||
summary: "Etherpad-lite server" |
|||
maintainer: "Valentin Lab <valentin.lab@kalysto.org>" |
|||
inherit: base-0k |
|||
## Custom built from git 1.8.14 https://github.com/ether/etherpad-lite with |
|||
## build arg --build-arg INSTALL_SOFFICE=1 |
|||
docker-image: docker.0k.io/etherpad:1.8.14-soffice ## custom built from git m etherpad/etherpad |
|||
description: | |
|||
Etherpad-lite service. |
|||
|
|||
data-resources: |
|||
- /var/lib/etherpad |
|||
|
|||
docker-compose: |
|||
command: node src/node/server.js --apikey /var/lib/etherpad/APIKEY.txt |
|||
environment: |
|||
SOFFICE: '/usr/bin/soffice' |
|||
|
|||
uses: |
|||
postgres-database: |
|||
#constraint: required | recommended | optional |
|||
#auto: pair | summon | none ## default: pair |
|||
constraint: required |
|||
auto: summon |
|||
solves: |
|||
database: "main storage" |
|||
default-options: |
|||
extensions: |
|||
- unaccent |
|||
web-proxy: |
|||
#constraint: required | recommended | optional |
|||
#auto: pair | summon | none ## default: pair |
|||
constraint: recommended |
|||
auto: pair |
|||
solves: |
|||
proxy: "Public access" |
|||
default-options: |
|||
target: !var-expand ${MASTER_BASE_SERVICE_NAME}:9001 |
|||
|
|||
backup: |
|||
constraint: recommended |
|||
auto: pair |
|||
solves: |
|||
backup: "Automatic regular backup" |
|||
default-options: |
|||
## First pattern matching wins, no pattern matching includes. |
|||
## include-patterns are checked first, then exclude-patterns |
|||
## Patterns rules: |
|||
## - ending / for directory |
|||
## - '*' authorized |
|||
## - must start with a '/', will start from $SERVICE_DATASTORE |
|||
#exclude-patterns: |
|||
# - "/var/lib/odoo/sessions/" |
@ -0,0 +1,44 @@ |
|||
#!/bin/bash |
|||
|
|||
## When writing relation script, remember: |
|||
## - they should be idempotents |
|||
## - they can be launched while the dockers is already up |
|||
## - they are launched from the host |
|||
## - the target of the link is launched first, and get a chance to ``relation-set`` |
|||
## - both side of the scripts get to use ``relation-get``. |
|||
|
|||
. lib/common |
|||
|
|||
set -e |
|||
|
|||
## XXXvlab: should use container name here so that it could support |
|||
## multiple postgres |
|||
label=${SERVICE_NAME} |
|||
DST=$CONFIGSTORE/$TARGET_SERVICE_NAME/etc/cron/$label |
|||
|
|||
## XXXvlab: Should we do a 'docker exec' instead ? |
|||
bin_console="dc run -u www-data --rm --entrypoint \\\"$GOGOCARTO_DIR/bin/console\\\" $MASTER_BASE_SERVICE_NAME" |
|||
|
|||
## Warning: 'docker -v' will use HOST directory even if launched from |
|||
## 'cron' container. |
|||
file_put "$DST" <<EOF |
|||
@daily root lock ${label}-checkvote -D -p 10 -c "\ |
|||
$bin_console app:elements:checkvote" 2>&1 | ts '\%F \%T \%Z' >> /var/log/cron/${SERVICE_NAME}-checkvote_script.log |
|||
|
|||
@daily root lock ${label}-checkExternalSourceToUpdate -D -p 10 -c "\ |
|||
$bin_console app:elements:checkExternalSourceToUpdate" 2>&1 | ts '\%F \%T \%Z' >> /var/log/cron/${SERVICE_NAME}-checkExternalSourceToUpdate_script.log |
|||
|
|||
@daily root lock ${label}-notify-moderation -D -p 10 -c "\ |
|||
$bin_console app:notify-moderation" 2>&1 | ts '\%F \%T \%Z' >> /var/log/cron/${SERVICE_NAME}-notify-moderation_script.log |
|||
|
|||
|
|||
@hourly root lock ${label}-sendNewsletter -D -p 10 -c "\ |
|||
$bin_console app:users:sendNewsletter" 2>&1 | ts '\%F \%T \%Z' >> /var/log/cron/${SERVICE_NAME}-sendNewsletter_script.log |
|||
|
|||
|
|||
*/5 * * * * root lock ${label}-webhooks-post -D -p 10 -c "\ |
|||
$bin_console --env=prod app:webhooks:post" 2>&1 | ts '\%F \%T \%Z' >> /var/log/cron/${SERVICE_NAME}-webhooks-post_script.log |
|||
|
|||
|
|||
EOF |
|||
chmod +x "$DST" |
@ -0,0 +1,12 @@ |
|||
#!/bin/bash |
|||
|
|||
init-config-add "\ |
|||
$SERVICE_NAME: |
|||
environment: |
|||
CMD_USECDN: \"false\" |
|||
" |
|||
|
|||
## ``codimd`` create uploads folder with wrong permission |
|||
uid=$(docker_get_uid "$SERVICE_NAME" "hedgedoc") |
|||
mkdir -p "$SERVICE_DATASTORE/hedgedoc/public/uploads" |
|||
chown "$uid" "$SERVICE_DATASTORE/hedgedoc/public/uploads" |
@ -0,0 +1,22 @@ |
|||
#!/bin/bash |
|||
|
|||
set -e |
|||
|
|||
DOMAIN=$(relation-get domain) || exit 1 |
|||
|
|||
## These are mainly to setup the correct web-hook |
|||
if [ "$MASTER_BASE_SERVICE_NAME" == "$DOMAIN" ]; then |
|||
## This is because the IP will be the docker container version |
|||
USESSL="" |
|||
else |
|||
USESSL="CMD_PROTOCOL_USESSL: 'true'" |
|||
fi |
|||
|
|||
config-add "\ |
|||
services: |
|||
$MASTER_BASE_SERVICE_NAME: |
|||
environment: |
|||
CMD_DOMAIN: $DOMAIN |
|||
$USESSL |
|||
" |
|||
|
@ -1,6 +1,6 @@ |
|||
docker-image: docker.0k.io/hackmd:2.2.0 ## from: nabo.codimd.dev/hackmdio/hackmd:2.2.0 |
|||
docker-image: docker.0k.io/hedgedoc:1.7.2 ## from: quay.io/hedgedoc/hedgedoc:1.7.2-alpine |
|||
data-resources: |
|||
- /home/hackmd/app/public/uploads |
|||
- /hedgedoc/public/uploads |
|||
|
|||
default-options: |
|||
|
@ -0,0 +1,19 @@ |
|||
FROM alpine:3.9 |
|||
|
|||
RUN apk add --no-cache mariadb mariadb-client mariadb-server-utils && \ |
|||
rm -f /var/cache/apk/* |
|||
|
|||
## Required by mysql-backup |
|||
RUN apk add --no-cache bash gzip && \ |
|||
rm -f /var/cache/apk/* |
|||
|
|||
RUN mkdir -p /run/mysqld && \ |
|||
chown -R mysql:mysql /run/mysqld |
|||
|
|||
RUN sed -i "s|.*bind-address\s*=.*|bind-address=0.0.0.0|g" /etc/my.cnf.d/mariadb-server.cnf |
|||
|
|||
COPY src/ / |
|||
|
|||
EXPOSE 3306 |
|||
|
|||
ENTRYPOINT ["/entrypoint.sh"] |
@ -0,0 +1,20 @@ |
|||
#!/bin/sh |
|||
|
|||
|
|||
|
|||
if ! [ -d /var/lib/mysql/mysql ]; then |
|||
chown -R mysql:mysql /var/lib/mysql |
|||
mysql_install_db --user=mysql --ldata=/var/lib/mysql > /dev/null |
|||
fi |
|||
|
|||
|
|||
|
|||
## Support of Ctrl-C: see https://github.com/docker-library/mysql/issues/47 |
|||
run() { |
|||
"$@" & |
|||
pid="$!" |
|||
trap "kill -SIGQUIT $pid" INT TERM |
|||
wait |
|||
} |
|||
|
|||
run mysqld --user=mysql --skip-name-resolve --skip-networking=0 "$@" |
@ -0,0 +1,102 @@ |
|||
|
|||
set -eux ## important for unbound variable ? |
|||
|
|||
## Require these to be set |
|||
# MYSQL_ROOT_PASSWORD= |
|||
# MYSQL_CONTAINER= |
|||
|
|||
[ "${MYSQL_ROOT_PASSWORD}" ] || { |
|||
echo "Error: you must set \$MYSQL_ROOT_PASSWORD prior to running this script." >&2 |
|||
exit 1 |
|||
} |
|||
|
|||
[ "${MYSQL_CONTAINER}" ] || { |
|||
echo "Error: you must set \$MYSQL_CONTAINER prior to running this script." >&2 |
|||
exit 1 |
|||
} |
|||
|
|||
|
|||
## |
|||
## Init, to setup passwordless connection to mysql |
|||
## |
|||
|
|||
type -p mysql >/dev/null || { |
|||
case $(lsb_release -is) in |
|||
Debian) |
|||
case $(lsb_release -rs) in |
|||
10) |
|||
apt-get install -y default-mysql-client </dev/null |
|||
;; |
|||
*) |
|||
apt-get install -y mysql-client </dev/null |
|||
;; |
|||
esac |
|||
;; |
|||
Ubuntu) |
|||
apt-get install -y mysql-client </dev/null |
|||
;; |
|||
esac |
|||
} |
|||
|
|||
if ! [ -e "/root/.my.cnf" ]; then |
|||
cat <<EOF > ~/.my.cnf |
|||
[client] |
|||
password=${MYSQL_ROOT_PASSWORD} |
|||
EOF |
|||
chmod 600 ~/.my.cnf |
|||
fi |
|||
|
|||
## |
|||
## installation of the mysql-backup script |
|||
## |
|||
|
|||
|
|||
apt-get install -y kal-shlib-{core,pretty,common} </dev/null |
|||
ln -sf "${PWD}/resources/bin/mysql-backup" /usr/local/sbin/mysql-backup |
|||
|
|||
|
|||
## |
|||
## Connection to cron |
|||
## |
|||
|
|||
|
|||
depends cron |
|||
cat <<EOF > /etc/cron.d/mysql-backup |
|||
SHELL=/bin/bash |
|||
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin |
|||
|
|||
0 * * * * root /usr/local/sbin/mysql-backup --host \$(docker-ip "$MYSQL_CONTAINER" 2>/dev/null | sed -r 's/ +/ /g' | cut -f 3 -d " ") | logger -t mysql-backup |
|||
|
|||
EOF |
|||
|
|||
|
|||
## |
|||
## Connection with backup |
|||
## |
|||
|
|||
if type -p mirror-dir >/dev/null 2>&1; then |
|||
[ -d "/etc/mirror-dir" ] || { |
|||
echo "'mirror-dir' is installed but no '/etc/mirror-dir' was found." >&2 |
|||
exit 1 |
|||
} |
|||
depends shyaml |
|||
|
|||
if ! sources=$(shyaml get-values default.sources < /etc/mirror-dir/config.yml); then |
|||
echo "Couldn't query 'default.sources' in '/etc/mirror-dir/config.yml'." >&2 |
|||
exit 1 |
|||
fi |
|||
|
|||
if ! echo "$sources" | grep "^/var/backups/mysql$" 2>/dev/null; then |
|||
sed -i '/sources:/a\ - /var/backups/mysql' /etc/mirror-dir/config.yml |
|||
cat <<EOF >> /etc/mirror-dir/config.yml |
|||
/var/backups/mysql: |
|||
exclude: |
|||
- "/*.inprogress" |
|||
EOF |
|||
fi |
|||
else |
|||
echo "warn: 'mirror-dir' not installed, backup won't be sent" >&2 |
|||
fi |
|||
|
|||
|
|||
|
@ -0,0 +1,27 @@ |
|||
#!/bin/bash |
|||
|
|||
## Init is run on host |
|||
## For now it is run every time the script is launched, but |
|||
## it should be launched only once after build. |
|||
|
|||
## Accessible variables are: |
|||
## - SERVICE_NAME Name of current service |
|||
## - DOCKER_BASE_IMAGE Base image from which this service might be built if any |
|||
## - SERVICE_DATASTORE Location on host of the DATASTORE of this service |
|||
## - SERVICE_CONFIGSTORE Location on host of the CONFIGSTORE of this service |
|||
|
|||
set -e |
|||
|
|||
APP_NAME=monujo |
|||
SOURCE_URL="https://docker.0k.io/downloads/$APP_NAME-0.0.1.tar.bz2" |
|||
LOCATION="$SERVICE_DATASTORE/opt/apps/$APP_NAME" |
|||
|
|||
mkdir -p "$LOCATION" |
|||
if dir_is_empty "$LOCATION"; then |
|||
cd "$LOCATION" |
|||
wget -q "$SOURCE_URL" -O file.tar.bz2 |
|||
tar xjf file.tar.bz2 |
|||
rm file.tar.bz2 |
|||
chown root:root "$LOCATION" -R |
|||
fi |
|||
|
@ -0,0 +1,17 @@ |
|||
description: "LokWallet" |
|||
maintainer: "Valentin Lab <valentin.lab@kalysto.org>" |
|||
subordinate: true |
|||
|
|||
uses: |
|||
publish-dir: |
|||
#constraint: required | recommended | optional |
|||
#auto: pair | summon | none ## default: pair |
|||
scope: container |
|||
constraint: required |
|||
auto: summon |
|||
solves: |
|||
container: "main running server" |
|||
default-options: |
|||
location: !var-expand "$DATASTORE/$BASE_SERVICE_NAME/opt/apps/monujo" |
|||
# data-dirs: ## write permission for web-app |
|||
# - . |
@ -1,13 +0,0 @@ |
|||
## This is a cache of nextcloud:18.0.1 image (gmp is included) |
|||
FROM docker.0k.io/nextcloud:1.2.0 |
|||
|
|||
|
|||
## |
|||
## What is following is only to patch nextcloud to remove |
|||
## some database name checks |
|||
## |
|||
|
|||
COPY database-accept-dots.patch /tmp/ |
|||
|
|||
RUN cd /usr/src/nextcloud && \ |
|||
patch -p1 < /tmp/database-accept-dots.patch |
@ -1,14 +0,0 @@ |
|||
diff --git a/lib/private/Setup/AbstractDatabase.php b/lib/private/Setup/AbstractDatabase.php
|
|||
index 0cbfecf..a821a2e 100644
|
|||
--- a/lib/private/Setup/AbstractDatabase.php
|
|||
+++ b/lib/private/Setup/AbstractDatabase.php
|
|||
@@ -72,9 +72,6 @@ abstract class AbstractDatabase {
|
|||
} elseif (empty($config['dbname'])) { |
|||
$errors[] = $this->trans->t("%s enter the database name.", [$this->dbprettyname]); |
|||
} |
|||
- if(substr_count($config['dbname'], '.') >= 1) {
|
|||
- $errors[] = $this->trans->t("%s you may not use dots in the database name", array($this->dbprettyname));
|
|||
- }
|
|||
return $errors; |
|||
} |
|||
|
@ -0,0 +1,50 @@ |
|||
#!/bin/bash |
|||
|
|||
## Load action gets a first argument a DIRECTORY holding the necessary files. |
|||
## |
|||
## |
|||
|
|||
if [ -z "$SERVICE_DATASTORE" ]; then |
|||
echo "This script is meant to be run through 'compose' to work properly." >&2 |
|||
exit 1 |
|||
fi |
|||
|
|||
usage="$exname [-h|--help] DBNAME [MODULE ...]" |
|||
|
|||
dbname= |
|||
modules=() |
|||
while [ "$1" ]; do |
|||
case "$1" in |
|||
"--help"|"-h") |
|||
print_usage |
|||
exit 0 |
|||
;; |
|||
*) |
|||
[ -z "$dbname" ] && { dbname=$1 ; shift ; continue ; } |
|||
modules+=("$1") |
|||
;; |
|||
esac |
|||
shift |
|||
done |
|||
|
|||
if [ -z "$dbname" ]; then |
|||
err "You must provide a destination database name as second argument." |
|||
print_usage |
|||
exit 1 |
|||
fi |
|||
|
|||
if [ -z "${modules[*]}" ]; then |
|||
err "You must provide at least one module as third argument." |
|||
print_usage |
|||
exit 1 |
|||
fi |
|||
|
|||
modules="$(echo "${modules[@]}" | tr " " ",")" |
|||
|
|||
## This can work only if ~/.my.cnf is correctly created by init. |
|||
|
|||
set -e |
|||
|
|||
launch_docker_compose run "$CONTAINER_NAME" --init="$modules" -d "$dbname" --stop-after-init |
|||
|
|||
info "Installed '$modules' module(s) into database '$dbname'." |
@ -0,0 +1,34 @@ |
|||
#!/bin/bash |
|||
|
|||
. lib/common |
|||
|
|||
set -e |
|||
|
|||
PASSWORD="$(relation-get password)" |
|||
USER="$(relation-get user)" |
|||
DBNAME="$(relation-get dbname)" |
|||
ADMIN_PASSWORD=$(relation-base-compose-get admin-password 2>/dev/null) || { |
|||
if [ -e "$CONFIG" ]; then |
|||
ADMIN_PASSWORD=$(grep ^admin_passwd "$CONFIG" | sed -r 's/^admin_passwd\s+=\s+(.+)$/\1/g') |
|||
fi |
|||
if [ -z "$ADMIN_PASSWORD" ]; then |
|||
info "Generating odoo admin password" |
|||
ADMIN_PASSWORD=$(gen_password) |
|||
fi |
|||
} |
|||
|
|||
database=$(options-get database 2>/dev/null) || true |
|||
database="${database:-$DBNAME}" |
|||
|
|||
config-add "\ |
|||
services: |
|||
$MASTER_BASE_SERVICE_NAME: |
|||
environment: |
|||
DB_TYPE: \"postgres\" |
|||
DB_HOST: \"$MASTER_TARGET_SERVICE_NAME\" |
|||
DB_NAME: \"$DBNAME\" |
|||
DB_PWD: \"$PASSWORD\" |
|||
DB_USER: \"$USER\" |
|||
" |
|||
|
|||
info "Configured $SERVICE_NAME code for $TARGET_SERVICE_NAME access." |
@ -1,26 +0,0 @@ |
|||
diff --git a/dist/server/initializers/checker-before-init.js b/dist/server/initializers/checker-before-init.js
|
|||
index d8422ee..5eb3678 100644
|
|||
--- a/dist/server/initializers/checker-before-init.js
|
|||
+++ b/dist/server/initializers/checker-before-init.js
|
|||
@@ -16,7 +16,7 @@ function checkMissedConfig() {
|
|||
const required = ['listen.port', 'listen.hostname', |
|||
'webserver.https', 'webserver.hostname', 'webserver.port', |
|||
'trust_proxy', |
|||
- 'database.hostname', 'database.port', 'database.suffix', 'database.username', 'database.password', 'database.pool.max',
|
|||
+ 'database.hostname', 'database.port', 'database.dbname', 'database.username', 'database.password', 'database.pool.max',
|
|||
'smtp.hostname', 'smtp.port', 'smtp.username', 'smtp.password', 'smtp.tls', 'smtp.from_address', |
|||
'email.body.signature', 'email.subject.prefix', |
|||
'storage.avatars', 'storage.videos', 'storage.logs', 'storage.previews', 'storage.thumbnails', 'storage.torrents', 'storage.cache', |
|||
diff --git a/dist/server/initializers/config.js b/dist/server/initializers/config.js
|
|||
index 6aa916f..89d16fe 100644
|
|||
--- a/dist/server/initializers/config.js
|
|||
+++ b/dist/server/initializers/config.js
|
|||
@@ -12,7 +12,7 @@ const CONFIG = {
|
|||
HOSTNAME: config.get('listen.hostname') |
|||
}, |
|||
DATABASE: { |
|||
- DBNAME: 'peertube' + config.get('database.suffix'),
|
|||
+ DBNAME: config.get('database.dbname'),
|
|||
HOSTNAME: config.get('database.hostname'), |
|||
PORT: config.get('database.port'), |
|||
USERNAME: config.get('database.username'), |
@ -0,0 +1,4 @@ |
|||
#!/bin/bash |
|||
|
|||
apt-get install ntp -y </dev/null |
|||
|
@ -0,0 +1,3 @@ |
|||
#!/bin/bash |
|||
|
|||
apt-get install logrotate -y </dev/null |
@ -1,8 +1,51 @@ |
|||
#!/bin/bash |
|||
|
|||
apt-get install lxc -y --force-yes </dev/null |
|||
apt-get install lxc -y </dev/null |
|||
|
|||
## required to access the created lxc ! |
|||
if ! [ -e ~/.ssh/id_rsa ]; then |
|||
ssh-keygen -t rsa -N "" -f ~/.ssh/id_rsa -q |
|||
fi |
|||
|
|||
|
|||
## From: https://wiki.debian.org/LXC#Independent_bridge_setup |
|||
lxc_net="$(cat /etc/lxc/default.conf | |
|||
grep ^lxc.net.0.type | |
|||
cut -f 2 -d = | |
|||
xargs echo)" |
|||
if [ "$lxc_net" == "empty" ]; then |
|||
## Boggers, we are on default unconfigured networks for lxc |
|||
sed -ri 's/^lxc.net.0.type = empty/lxc.net.0.type = veth\nlxc.net.0.link = lxcbr0\nlxc.net.0.flags = up\nlxc.net.0.hwaddr = 00:16:3e:xx:xx:xx/g' /etc/lxc/default.conf |
|||
[ -e "/etc/default/lxc-net" ] || { |
|||
cat <<EOF > /etc/default/lxc-net |
|||
USE_LXC_BRIDGE="true" |
|||
|
|||
# If you change the LXC_BRIDGE to something other than lxcbr0, then |
|||
# you will also need to update your /etc/lxc/default.conf as well as the |
|||
# configuration (/var/lib/lxc/<container>/config) for any containers |
|||
# already created using the default config to reflect the new bridge |
|||
# name. |
|||
# If you have the dnsmasq daemon installed, you'll also have to update |
|||
# /etc/dnsmasq.d/lxc and restart the system wide dnsmasq daemon. |
|||
LXC_BRIDGE="lxcbr0" |
|||
LXC_ADDR="172.101.0.1" |
|||
LXC_NETMASK="255.255.255.0" |
|||
LXC_NETWORK="172.101.0.0/24" |
|||
LXC_DHCP_RANGE="172.101.0.2,172.101.0.254" |
|||
LXC_DHCP_MAX="253" |
|||
# Uncomment the next line if you'd like to use a conf-file for the lxcbr0 |
|||
# dnsmasq. For instance, you can use 'dhcp-host=mail1,172.46.0.100' to have |
|||
# container 'mail1' always get ip address 172.46.0.100. |
|||
LXC_DHCP_CONFILE=/etc/lxc/dnsmasq.conf |
|||
|
|||
# Uncomment the next line if you want lxcbr0's dnsmasq to resolve the .lxc |
|||
# domain. You can then add "server=/lxc/172.46.0.1' (or your actual ) |
|||
# to /etc/dnsmasq.conf, after which 'container1.lxc' will resolve on your |
|||
# host. |
|||
#LXC_DOMAIN="lxc" |
|||
|
|||
EOF |
|||
} |
|||
|
|||
service lxc-net restart |
|||
fi |
@ -1,26 +1,32 @@ |
|||
#!/bin/bash |
|||
|
|||
need_restart= |
|||
|
|||
just_installed= |
|||
if ! type -p docker; then |
|||
echo "Installing docker..." |
|||
curl -sSL https://get.docker.io | sh |
|||
fi |
|||
type -p curl >dev/null || |
|||
apt-get install -y curl </dev/null |
|||
|
|||
docker_version=17.06 |
|||
if ! [[ "$(docker --version)" == "Docker version $docker_version"* ]]; then |
|||
version="$(apt-cache madison docker-ce | cut -f 2 -d \| | grep "$docker_version" | head -n 1 | xargs echo)" |
|||
## DOWNGRADE to 17.xx because 18.xx do not support registry v1 |
|||
apt-get install -y --force-yes docker-ce="$version" |
|||
need_restart=true |
|||
curl -sSL https://get.docker.io | sh || exit 1 |
|||
just_installed=1 |
|||
fi |
|||
|
|||
|
|||
if ! egrep 'disable-legacy-registry' /lib/systemd/system/docker.service 2>/dev/null; then |
|||
sed -ri 's/^(ExecStart=.*)$/\1 --disable-legacy-registry=false/g' /lib/systemd/system/docker.service |
|||
need_restart=true |
|||
fi |
|||
if [ -n "$just_installed" ]; then |
|||
need_restart= |
|||
docker_version=17 |
|||
if ! [[ "$(docker --version)" == "Docker version $docker_version"* ]]; then |
|||
version="$(apt-cache madison docker-ce | |
|||
cut -f 2 -d \| | |
|||
grep "$docker_version" | |
|||
head -n 1 | xargs echo)" |
|||
## DOWNGRADE to 17.xx because 18.xx do not support registry v1 |
|||
apt-get install -y --allow-downgrades docker-ce="$version" |
|||
need_restart=true |
|||
fi |
|||
|
|||
if [ "$need_restart" ]; then |
|||
systemctl daemon-reload && |
|||
service docker restart |
|||
if [ -n "$need_restart" ] && [ -z "$NO_DOCKER_RESTART" ]; then |
|||
systemctl daemon-reload && |
|||
service docker restart |
|||
fi |
|||
fi |
@ -0,0 +1 @@ |
|||
../../../../rsync-backup/hooks/install.d/60-install.sh |
@ -0,0 +1,23 @@ |
|||
#!/bin/bash |
|||
|
|||
|
|||
## Depends lxc-scripts installed |
|||
|
|||
|
|||
## |
|||
## Install |
|||
## |
|||
|
|||
|
|||
apt-get install -y fail2ban </dev/null |
|||
|
|||
sed -ri 's/^(bantime\s+=\s+.*)$/bantime = 1w/g' /etc/fail2ban/jail.conf |
|||
sed -ri 's/^(findtime\s+=\s+.*)$/findtime = 26w/g' /etc/fail2ban/jail.conf |
|||
|
|||
|
|||
## |
|||
## Test |
|||
## |
|||
|
|||
# fail2ban-client status |
|||
# fail2ban-client status sshd |
@ -0,0 +1,60 @@ |
|||
# -*- ispell-local-dictionary: "english" -*- |
|||
#+SETUPFILE: ~/.emacs.d/etc/setup/latex.setup |
|||
#+SETUPFILE: ~/.emacs.d/etc/setup/html-readtheorg-local.setup |
|||
|
|||
#+TITLE: Rocket.Chat |
|||
|
|||
* Updating the charm to a new version |
|||
|
|||
We are using official image. Latest tags usually. |
|||
|
|||
** Test new version |
|||
|
|||
Rocket.chat has a powerfull and working database update mecanism that |
|||
will take care of migrating database on startup. |
|||
|
|||
*** Get latest available versions |
|||
|
|||
You can double-check available candidate for official images like this: |
|||
|
|||
#+begin_src sh |
|||
docker-tags-fetch rocketchat/rocket.chat -l 15 -f "^[0-9]+\.[0-9]+\.[0-9]+$" | sort -rV |
|||
#+end_src |
|||
|
|||
Check/Choose the version you want to test. |
|||
|
|||
*** Modify your own =compose.yml= |
|||
|
|||
By adding these 2 lines in your rocket chat service: |
|||
|
|||
#+begin_src yaml |
|||
docker-compose: |
|||
image: rocketchat/rocket.chat:X.Y.Z |
|||
#+end_src |
|||
|
|||
Replace X.Y.Z by the target version you want to test. |
|||
|
|||
Launch =compose up=. |
|||
|
|||
Be ready to wait a few minutes after =compose up= finished before the |
|||
service to be available: rocketchat is expected to take some time to |
|||
migrate. |
|||
|
|||
|
|||
** Change the current charm to include new version |
|||
|
|||
To prepare the commit for next version, you can run the following |
|||
on the repository you'll use to push the new commit. |
|||
|
|||
#+begin_src sh |
|||
BASENAME=rocketchat/rocket.chat |
|||
VERSION=$(docker-tags-fetch "$BASENAME" -l 15 -f "^[0-9]+\.[0-9]+\.[0-9]+$" | sort -rV | head -n 1) |
|||
echo Last version of rocket chat: $VERSION |
|||
docker pull rocketchat/rocket.chat:"$VERSION" && |
|||
docker tag rocketchat/rocket.chat:"$VERSION" docker.0k.io/rocketchat:"$VERSION" && |
|||
docker push docker.0k.io/rocketchat:"$VERSION" && |
|||
sed -ri "s%^(docker-image: docker.0k.io/rocketchat:).*%\1$VERSION%" metadata.yml && |
|||
sed -ri "s%^(#docker-image: rocketchat/rocket.chat:).*%\1$VERSION%" metadata.yml |
|||
#+end_src |
|||
|
|||
You can review the changes and commit them. |
@ -0,0 +1,125 @@ |
|||
#+PROPERTY: Effort_ALL 0 0:30 1:00 2:00 0.5d 1d 1.5d 2d 3d 4d 5d |
|||
#+PROPERTY: Max_effort_ALL 0 0:30 1:00 2:00 0.5d 1d 1.5d 2d 3d 4d 5d |
|||
#+PROPERTY: header-args:python :var filename=(buffer-file-name) |
|||
#+PROPERTY: header-args:sh :var filename=(buffer-file-name) |
|||
#+TODO: TODO WIP BLOCKED | DONE CANCELED |
|||
#+LATEX_HEADER: \usepackage[margin=0.5in]{geometry} |
|||
#+LaTeX_HEADER: \hypersetup{linktoc = all, colorlinks = true, urlcolor = DodgerBlue4, citecolor = PaleGreen1, linkcolor = blue} |
|||
#+LaTeX_CLASS: article |
|||
#+OPTIONS: H:8 ^:nil prop:("Effort" "Max_effort") tags:not-in-toc |
|||
#+COLUMNS: %50ITEM %Effort(Min Effort) %Max_effort(Max Effort) |
|||
|
|||
#+TITLE: rsync-backup-target |
|||
|
|||
#+LATEX: \pagebreak |
|||
|
|||
Usage of this service |
|||
|
|||
#+LATEX: \pagebreak |
|||
|
|||
#+LATEX: \pagebreak |
|||
|
|||
|
|||
* Configuration example |
|||
|
|||
|
|||
#+begin_src yaml |
|||
rsync-backup-target: |
|||
# docker-compose: |
|||
# ports: |
|||
# - "10023:22" |
|||
options: |
|||
admin: ## These keys are for the allowed rsync-backup to write stuff with rsync |
|||
myadmin: |
|||
- "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQDESdz8bWtVcDQJ68IE/KpuZM9tAq\ |
|||
ZDXGbvEVnTg16/yWqBGQg0QZdDjISsPn7D3Zr64g2qgD9n7EZghfGP9TkitvfrBYx8p\ |
|||
7JkkUyt8nxklwOlKZFD5b3PF2bHloSsmjnP8ZMp5Ar7E+tn1guGrCrTcFIebpVGR3qF\ |
|||
hRN9AlWNR+ekWo88ZlLJIrqD26jbWRJZm4nPCgqwhJwfHE3aVwfWGOqjSp4ij+jr2ac\ |
|||
Arg7eD4clBPYIqKlqbfNRD5MFAH9sbB6jkebQCAUwNRwV7pKwCEt79HnCMoMjnZh6Ww\ |
|||
6TlHIFw936C2ZiTBuofMx7yoAeqpifyzz/T5wsFLYWwSnX rsync@zen" |
|||
#+end_src |
|||
|
|||
* ssh API |
|||
** Adding new keys for backup |
|||
|
|||
This can be done through the admin accounts configured in =compose.yml=. |
|||
|
|||
You can use then =ssh myadmin@$RSYNC_BACKUP_TARGET ssh-key=: |
|||
|
|||
#+begin_example |
|||
$ ssh myadmin@$RSYNC_BACKUP_TARGET ssh-key ls |
|||
$ ssh myadmin@$RSYNC_BACKUP_TARGET ssh-key add "ssh-rsa AAA...Jdhwhv rsync@sourcelabel" |
|||
$ ssh myadmin@$RSYNC_BACKUP_TARGET ssh-key ls |
|||
..Jdhwhv sourcelabel |
|||
$ ssh myadmin@$RSYNC_BACKUP_TARGET ssh-key rm sourcelabel |
|||
$ ssh myadmin@$RSYNC_BACKUP_TARGET ssh-key ls |
|||
$ |
|||
#+end_example |
|||
|
|||
** Requesting a recover only key |
|||
|
|||
*** as an admin |
|||
|
|||
As an admin, by requesting a recover-only key on an ident that you |
|||
own, you are allowed to read (and only read) the content of the given |
|||
ident. This will allow you to give the credentials to any new host to |
|||
have a direct read access so-as to deploy the backup on a new host. |
|||
|
|||
#+begin_example |
|||
$ ssh myadmin@$RSYNC_BACKUP_TARGET ssh-key request-recovery-key myident > /tmp/private_key |
|||
$ chmod 500 /tmp/private_key |
|||
$ rsync -e "ssh -p 22 -i /tmp/private_key -l rsync" \ |
|||
-azvArH --delete --delete-excluded \ |
|||
--partial --partial-dir .rsync-partial \ |
|||
--numeric-ids $RSYNC_BACKUP_TARGET:/var/mirror/myident/etc/ /tmp/etc |
|||
#+end_example |
|||
|
|||
This key will expire after 15 mn of the last recovery. |
|||
|
|||
*** as a standard backup account |
|||
|
|||
With a standard backup account, you can log on as =rsync= user and |
|||
request without any arguments a recovery key. Indeed, every standard |
|||
backup account is tied to one backup identifier only. So the recover |
|||
key received will be for this backup identifier only. |
|||
|
|||
You'll probably want to use the received key from another computer to |
|||
restore the backup for instance. |
|||
|
|||
#+begin_example |
|||
$ ssh rsync@$RSYNC_BACKUP_TARGET request-recovery-key > /tmp/private_key |
|||
$ chmod 500 /tmp/private_key |
|||
$ rsync -e "ssh -p 22 -i /tmp/private_key -l rsync" \ |
|||
-azvArH --delete --delete-excluded \ |
|||
--partial --partial-dir .rsync-partial \ |
|||
--numeric-ids $RSYNC_BACKUP_TARGET:/var/mirror/myident/etc/ /tmp/etc |
|||
#+end_example |
|||
|
|||
|
|||
* Troubleshooting |
|||
|
|||
** Faking access from client |
|||
|
|||
This should work: |
|||
|
|||
#+begin_src sh |
|||
RSYNC_BACKUP_TARGET_IP=172.18.0.2 |
|||
rsync -azvA -e "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" \ |
|||
/tmp/toto "$RSYNC_BACKUP_TARGET":/var/mirror/client1 |
|||
#+end_src |
|||
|
|||
** Direct ssh access should be refused |
|||
|
|||
#+begin_src sh |
|||
RSYNC_BACKUP_TARGET_IP=172.18.0.2 |
|||
ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no \ |
|||
"$RSYNC_BACKUP_TARGET" |
|||
#+end_src |
|||
|
|||
** Wrong directory should be refused |
|||
|
|||
#+begin_src sh |
|||
RSYNC_BACKUP_TARGET_IP=172.18.0.2 |
|||
rsync -azvA -e "ssh -o UserKnownHostsFile=/dev/null -o StrictHostKeyChecking=no" \ |
|||
/tmp/toto "$RSYNC_BACKUP_TARGET":/var/mirror/client2 |
|||
#+end_src |
@ -0,0 +1,7 @@ |
|||
## allow admin users to request a recovery key, this is really not |
|||
## sufficient, but the real check is done on the |
|||
## ``ssh-admin-cmd-validate`` side. |
|||
|
|||
%rsync ALL=(root) NOPASSWD: /usr/local/sbin/request-recovery-key * |
|||
%rsync ALL=(root) NOPASSWD: /bin/touch /etc/rsync/keys/recover/* |
|||
%rsync ALL=(root) NOPASSWD: /usr/local/sbin/ssh-update-keys |
@ -0,0 +1,76 @@ |
|||
#!/bin/bash |
|||
|
|||
RSYNC_KEY_PATH=/etc/rsync/keys |
|||
RECOVER_KEY_PATH=${RSYNC_KEY_PATH}/recover |
|||
|
|||
|
|||
ANSI_ESC=$'\e[' |
|||
|
|||
NORMAL="${ANSI_ESC}0m" |
|||
|
|||
GRAY="${ANSI_ESC}1;30m" |
|||
RED="${ANSI_ESC}1;31m" |
|||
GREEN="${ANSI_ESC}1;32m" |
|||
YELLOW="${ANSI_ESC}1;33m" |
|||
BLUE="${ANSI_ESC}1;34m" |
|||
PINK="${ANSI_ESC}1;35m" |
|||
CYAN="${ANSI_ESC}1;36m" |
|||
WHITE="${ANSI_ESC}1;37m" |
|||
|
|||
DARKGRAY="${ANSI_ESC}0;30m" |
|||
DARKRED="${ANSI_ESC}0;31m" |
|||
DARKGREEN="${ANSI_ESC}0;32m" |
|||
DARKYELLOW="${ANSI_ESC}0;33m" |
|||
DARKBLUE="${ANSI_ESC}0;34m" |
|||
DARKPINK="${ANSI_ESC}0;35m" |
|||
DARKCYAN="${ANSI_ESC}0;36m" |
|||
DARKWHITE="${ANSI_ESC}0;37m" |
|||
|
|||
|
|||
ssh:mk-private-key() { |
|||
local comment="$1" |
|||
( |
|||
tmpdir=$(mktemp -d) |
|||
chmod go-rwx "$tmpdir" |
|||
ssh-keygen -t rsa -N "" -f "$tmpdir/rsync_rsa" -C "$service_name@$host" >/dev/null |
|||
cat "$tmpdir/rsync_rsa" |
|||
rm -rf "$tmpdir" |
|||
) |
|||
} |
|||
|
|||
|
|||
md5() { |
|||
local md5 |
|||
md5=$(cat | md5sum) |
|||
echo "${md5%% *}" |
|||
} |
|||
|
|||
|
|||
request-recovery-key() { |
|||
local label="$1" ident="$2" key public_key |
|||
|
|||
## Admin should have claimed the ident with at least one backup key |
|||
if [ -n "$label" ] && ! [ -e "${RSYNC_KEY_PATH}/backup/$label/$ident.pub" ]; then |
|||
echo "Error: Current admin '$label' has no ident '$ident' claimed." >&2 |
|||
return 1 |
|||
fi |
|||
|
|||
## Find new label |
|||
while true; do |
|||
key=$(ssh:mk-private-key "recover@$ident") |
|||
md5=$(printf "%s" "$key" | md5) |
|||
[ -e "${RECOVER_KEY_PATH}/$md5" ] || break |
|||
done |
|||
|
|||
mkdir -p "${RECOVER_KEY_PATH}" |
|||
public_key=$(ssh-keygen -y -f <(printf "%s\n" "$key")) |
|||
printf "%s %s\n" "$public_key" "recover@$ident" > "${RECOVER_KEY_PATH}/$md5.pub" |
|||
touch "${RECOVER_KEY_PATH}/$md5" |
|||
chmod go-rwx "${RECOVER_KEY_PATH}/$md5" |
|||
printf "%s\n" "$key" | tee -a "${RECOVER_KEY_PATH}/$md5" |
|||
|
|||
/usr/local/sbin/ssh-update-keys |
|||
} |
|||
|
|||
|
|||
request-recovery-key "$@" |
@ -0,0 +1,106 @@ |
|||
#!/bin/bash |
|||
|
|||
## Note that the shebang is not used, but it's the login shell that |
|||
## will execute this command. |
|||
|
|||
exname=$(basename "$0") |
|||
|
|||
mkdir -p /var/log/rsync |
|||
|
|||
LOG="/var/log/rsync/$exname.log" |
|||
|
|||
|
|||
ssh_connection=(${SSH_CONNECTION}) |
|||
SSH_SOURCE_IP="${ssh_connection[0]}:${ssh_connection[1]}" |
|||
|
|||
log() { |
|||
printf "%s [%s] %s - %s\n" \ |
|||
"$(date --rfc-3339=seconds)" "$$" "$SSH_SOURCE_IP" "$*" \ |
|||
>> "$LOG" |
|||
} |
|||
|
|||
log "NEW ADMIN CONNECTION" |
|||
|
|||
if [ -z "$1" ] || ! [[ "$1" =~ ^[a-zA-Z0-9._-]+$ ]]; then |
|||
log "INVALID SETUP, ARG IS: '$1'" |
|||
echo "Your command has been rejected. Contact administrator." |
|||
exit 1 |
|||
fi |
|||
|
|||
label="$1" |
|||
|
|||
|
|||
reject() { |
|||
log "REJECTED: $SSH_ORIGINAL_COMMAND" |
|||
# echo "ORIG: $SSH_ORIGINAL_COMMAND" >&2 |
|||
echo "Your command has been rejected and reported to sys admin." >&2 |
|||
exit 1 |
|||
} |
|||
|
|||
|
|||
if [[ "$SSH_ORIGINAL_COMMAND" =~ [\&\(\{\;\<\>\`\$\}] ]]; then |
|||
log "BAD CHARS DETECTED" |
|||
# echo "Bad chars: $SSH_ORIGINAL_COMMAND" >&2 |
|||
reject |
|||
fi |
|||
|
|||
if [[ "$SSH_ORIGINAL_COMMAND" =~ ^"ssh-key add ssh-rsa "[a-zA-Z0-9/+]+" "[a-zA-Z0-9._-]+"@"[a-zA-Z0-9._-]+""$ ]]; then |
|||
log "ACCEPTED: $SSH_ORIGINAL_COMMAND" |
|||
|
|||
## Interpret \ to allow passing spaces (want to avoid possible issue with \n) |
|||
#read -a ssh_args <<< "${SSH_ORIGINAL_COMMAND}" |
|||
ssh_args=(${SSH_ORIGINAL_COMMAND}) |
|||
|
|||
# echo "Would accept: $SSH_ORIGINAL_COMMAND" >&2 |
|||
exec sudo /usr/local/sbin/ssh-key add "$label" "${ssh_args[@]:2}" |
|||
elif [[ "$SSH_ORIGINAL_COMMAND" =~ ^"ssh-key ls"$ ]]; then |
|||
log "ACCEPTED: $SSH_ORIGINAL_COMMAND" |
|||
|
|||
## Interpret \ to allow passing spaces (want to avoid possible issue with \n) |
|||
#read -a ssh_args <<< "${SSH_ORIGINAL_COMMAND}" |
|||
ssh_args=(${SSH_ORIGINAL_COMMAND}) |
|||
|
|||
# echo "Would accept: $SSH_ORIGINAL_COMMAND" >&2 |
|||
exec /usr/local/sbin/ssh-key ls "$label" "${ssh_args[@]:2}" |
|||
elif [[ "$SSH_ORIGINAL_COMMAND" =~ ^"ssh-key rm "[a-zA-Z0-9._-]+$ ]]; then |
|||
log "ACCEPTED: $SSH_ORIGINAL_COMMAND" |
|||
|
|||
## Interpret \ to allow passing spaces (want to avoid possible issue with \n) |
|||
#read -a ssh_args <<< "${SSH_ORIGINAL_COMMAND}" |
|||
ssh_args=(${SSH_ORIGINAL_COMMAND}) |
|||
|
|||
# echo "Would accept: $SSH_ORIGINAL_COMMAND" >&2 |
|||
exec sudo /usr/local/sbin/ssh-key rm "$label" "${ssh_args[@]:2}" |
|||
elif [[ "$SSH_ORIGINAL_COMMAND" =~ ^"ssh-key get-type "[a-zA-Z0-9._-]+$ ]]; then |
|||
log "ACCEPTED: $SSH_ORIGINAL_COMMAND" |
|||
|
|||
## Interpret \ to allow passing spaces (want to avoid possible issue with \n) |
|||
#read -a ssh_args <<< "${SSH_ORIGINAL_COMMAND}" |
|||
ssh_args=(${SSH_ORIGINAL_COMMAND}) |
|||
|
|||
# echo "Would accept: $SSH_ORIGINAL_COMMAND" >&2 |
|||
exec sudo /usr/local/sbin/ssh-key get-type "$label" "${ssh_args[@]:2}" |
|||
elif [[ "$SSH_ORIGINAL_COMMAND" =~ ^"request-recovery-key "[a-zA-Z0-9._-]+$ ]]; then |
|||
log "ACCEPTED: $SSH_ORIGINAL_COMMAND" |
|||
|
|||
## Interpret \ to allow passing spaces (want to avoid possible issue with \n) |
|||
#read -a ssh_args <<< "${SSH_ORIGINAL_COMMAND}" |
|||
ssh_args=(${SSH_ORIGINAL_COMMAND}) |
|||
|
|||
# echo "Would accept: $SSH_ORIGINAL_COMMAND" >&2 |
|||
exec sudo /usr/local/sbin/request-recovery-key "$label" "${ssh_args[@]:1}" |
|||
else |
|||
|
|||
log "NOT MATCHING ANY ALLOWED COMMAND" |
|||
reject |
|||
fi |
|||
|
|||
## For other commands, like `find` or `md5`, that could be used to |
|||
## challenge the backups and check that archive is actually |
|||
## functional, I would suggest to write a simple command that takes no |
|||
## arguments, so as to prevent allowing wildcards or suspicious |
|||
## contents. Letting `find` go through is dangerous for instance |
|||
## because of the `-exec`. And path traversal can be done also when |
|||
## allowing /my/path/* by using '..'. This is why a fixed purpose |
|||
## embedded executable will be much simpler to handle, and to be honest |
|||
## we don't need much more. |
@ -0,0 +1,152 @@ |
|||
#!/bin/bash |
|||
|
|||
RSYNC_KEY_PATH=/etc/rsync/keys |
|||
|
|||
|
|||
ANSI_ESC=$'\e[' |
|||
|
|||
NORMAL="${ANSI_ESC}0m" |
|||
|
|||
GRAY="${ANSI_ESC}1;30m" |
|||
RED="${ANSI_ESC}1;31m" |
|||
GREEN="${ANSI_ESC}1;32m" |
|||
YELLOW="${ANSI_ESC}1;33m" |
|||
BLUE="${ANSI_ESC}1;34m" |
|||
PINK="${ANSI_ESC}1;35m" |
|||
CYAN="${ANSI_ESC}1;36m" |
|||
WHITE="${ANSI_ESC}1;37m" |
|||
|
|||
DARKGRAY="${ANSI_ESC}0;30m" |
|||
DARKRED="${ANSI_ESC}0;31m" |
|||
DARKGREEN="${ANSI_ESC}0;32m" |
|||
DARKYELLOW="${ANSI_ESC}0;33m" |
|||
DARKBLUE="${ANSI_ESC}0;34m" |
|||
DARKPINK="${ANSI_ESC}0;35m" |
|||
DARKCYAN="${ANSI_ESC}0;36m" |
|||
DARKWHITE="${ANSI_ESC}0;37m" |
|||
|
|||
|
|||
ssh-key-ls() { |
|||
local label="$1" f content |
|||
for f in "${RSYNC_KEY_PATH}"/backup/"$label"/*.pub; do |
|||
[ -e "$f" ] || continue |
|||
ident=${f##*/} |
|||
ident=${ident%.pub} |
|||
content=$(cat "$f") |
|||
key=${content#* } |
|||
key=${key% *} |
|||
printf "${DARKGRAY}..${NORMAL}%24s ${DARKCYAN}%s${NORMAL}\n" "${key: -24}" "$ident" |
|||
done |
|||
} |
|||
|
|||
|
|||
ssh-key-rm() { |
|||
local label="$1" ident="$2" delete |
|||
|
|||
delete="${RSYNC_KEY_PATH}/backup/$label/$ident.pub" |
|||
if ! [ -e "$delete" ]; then |
|||
echo "Error: key '$ident' not found." >&2 |
|||
return 1 |
|||
fi |
|||
rm "$delete" |
|||
|
|||
/usr/local/sbin/ssh-update-keys |
|||
} |
|||
|
|||
|
|||
ssh-key-get-type() { |
|||
local label="$1" ident="$2" key content commentary |
|||
|
|||
key="${RSYNC_KEY_PATH}/backup/$label/$ident.pub" |
|||
if ! [ -e "$key" ]; then |
|||
echo "Error: key '$ident' not found." >&2 |
|||
return 1 |
|||
fi |
|||
content=$(cat "$key") || return 1 |
|||
commentary=${content##* } |
|||
printf "%s\n" "${commentary%%@*}" |
|||
} |
|||
|
|||
|
|||
ssh-key-add() { |
|||
local label="$1" type="$2" key="$3" email="$4" |
|||
|
|||
[ "$type" == "ssh-rsa" ] || { |
|||
echo "Error: expecting ssh-rsa key type" >&2 |
|||
return 1 |
|||
} |
|||
|
|||
## ident are unique by construction (they are struct keys) |
|||
## but keys need to be also unique |
|||
declare -A keys |
|||
content="$type $key $email" |
|||
ident="${email##*@}" |
|||
target="${RSYNC_KEY_PATH}/backup/$label/$ident.pub" |
|||
|
|||
## is key used already ? As key give access to a specified subdir, |
|||
## we need to make sure it is unique. |
|||
|
|||
for key_file in "${RSYNC_KEY_PATH}/backup/"*/*.pub; do |
|||
[ -e "$key_file" ] || continue |
|||
key_content=$(cat "$key_file") |
|||
if [ "$type $key" == "${key_content% *}" ]; then |
|||
if [ "$key_file" == "$target" ]; then |
|||
echo "Provided key already present for '$ident'." >&2 |
|||
return 0 |
|||
elif [[ "$key_file" == "${RSYNC_KEY_PATH}/"*"/$label/"*.pub ]]; then |
|||
type=${key_file#"${RSYNC_KEY_PATH}/"} |
|||
type=${type%"/$label/"*.pub} |
|||
key_ident=${key_file##*/} |
|||
key_ident=${key_ident%.pub} |
|||
echo "Provided key already used as $type key for '$key_ident'." >&2 |
|||
return 1 |
|||
else |
|||
olabel=${key_file#"${RSYNC_KEY_PATH}/"*/} |
|||
olabel=${olabel%/*.pub} |
|||
echo "Specified key is already used by '$olabel' account, please pick another one." >&2 |
|||
return 1 |
|||
fi |
|||
fi |
|||
done |
|||
|
|||
mkdir -p "${target%/*}" |
|||
if [ -e "$target" ]; then |
|||
echo "Replacing key for '$ident'." >&2 |
|||
elif [ -e "${RSYNC_KEY_PATH}/"*"/"*"/$ident.pub" ]; then |
|||
olabel=("${RSYNC_KEY_PATH}/"*"/"*"/$ident.pub") |
|||
olabel="${olabel[0]}" |
|||
olabel=${olabel#"${RSYNC_KEY_PATH}/"*/} |
|||
olabel=${olabel%/*.pub} |
|||
echo "ident '$ident' is already reserved by '$olabel', please pick another one." >&2 |
|||
return 1 |
|||
fi |
|||
echo "$content" > "$target" |
|||
|
|||
/usr/local/sbin/ssh-update-keys |
|||
} |
|||
|
|||
|
|||
|
|||
|
|||
case "$1" in |
|||
"add") |
|||
shift |
|||
ssh-key-add "$@" |
|||
;; |
|||
"rm") |
|||
shift |
|||
ssh-key-rm "$@" |
|||
;; |
|||
"ls") |
|||
shift |
|||
ssh-key-ls "$@" |
|||
;; |
|||
"get-type") |
|||
shift |
|||
ssh-key-get-type "$@" |
|||
;; |
|||
*) |
|||
echo "Unknown command '$1'." |
|||
;; |
|||
esac |
|||
|
@ -0,0 +1,97 @@ |
|||
#!/bin/bash |
|||
|
|||
## Note that the shebang is not used, but it's the login shell that |
|||
## will execute this command. |
|||
|
|||
RSYNC_KEY_PATH=/etc/rsync/keys |
|||
RECOVER_KEY_PATH=${RSYNC_KEY_PATH}/recover |
|||
|
|||
exname=$(basename "$0") |
|||
|
|||
mkdir -p /var/log/rsync |
|||
|
|||
LOG="/var/log/rsync/$exname.log" |
|||
|
|||
|
|||
ssh_connection=(${SSH_CONNECTION}) |
|||
SSH_SOURCE_IP="${ssh_connection[0]}:${ssh_connection[1]}" |
|||
|
|||
log() { |
|||
printf "%s [%s] %s - %s\n" \ |
|||
"$(date --rfc-3339=seconds)" "$$" "$SSH_SOURCE_IP" "$*" \ |
|||
>> "$LOG" |
|||
} |
|||
|
|||
log "NEW RECOVER CONNECTION" |
|||
|
|||
if [ -z "$1" ] || ! [[ "$1" =~ ^[a-z0-9]+$ ]]; then |
|||
log "INVALID SETUP, ARG 1 SHOULD BE MD5 AND IS: '$1'" |
|||
echo "Your command has been rejected. Contact administrator." |
|||
exit 1 |
|||
fi |
|||
|
|||
md5="$1" |
|||
log "RECOVER KEY $md5" |
|||
|
|||
if [ -z "$2" ] || ! [[ "$2" =~ ^[a-zA-Z0-9._-]+$ ]]; then |
|||
log "INVALID SETUP, IDENT IS: '$1'" |
|||
echo "Your command has been rejected. Contact administrator." |
|||
exit 1 |
|||
fi |
|||
|
|||
ident="$2" |
|||
log "IDENTIFIED AS $ident" |
|||
|
|||
reject() { |
|||
log "REJECTED: $SSH_ORIGINAL_COMMAND" |
|||
# echo "ORIG: $SSH_ORIGINAL_COMMAND" >&2 |
|||
echo "Your command has been rejected and reported to sys admin." >&2 |
|||
exit 1 |
|||
} |
|||
|
|||
|
|||
if [[ "$SSH_ORIGINAL_COMMAND" =~ [\&\(\{\;\<\>\`\$\}] ]]; then |
|||
log "BAD CHARS DETECTED" |
|||
# echo "Bad chars: $SSH_ORIGINAL_COMMAND" >&2 |
|||
reject |
|||
fi |
|||
|
|||
if [[ "$SSH_ORIGINAL_COMMAND" =~ ^"rsync --server --sender -"[vnldHogDtpArRze\.iLsfxC]+(" --"[a-z=%-]+|" --partial-dir .rsync-partial")*" . /var/mirror/$ident"(|/.*)$ ]]; then |
|||
|
|||
## Interpret \ to allow passing spaces (want to avoid possible issue with \n) |
|||
#read -a ssh_args <<< "${SSH_ORIGINAL_COMMAND}" |
|||
ssh_args=(${SSH_ORIGINAL_COMMAND}) |
|||
|
|||
last_arg="${ssh_args[@]: -1:1}" |
|||
if ! new_path=$(realpath "$last_arg" 2>/dev/null); then |
|||
log "FINAL PATH INVALID" |
|||
reject |
|||
fi |
|||
|
|||
if [[ "$new_path" != "$last_arg" ]] && |
|||
[[ "$new_path" != "/var/mirror/$ident/"* ]] && |
|||
[[ "$new_path" != "/var/mirror/$ident" ]]; then |
|||
log "FINAL PATH SUSPICIOUS" |
|||
reject |
|||
fi |
|||
|
|||
sudo /usr/local/sbin/ssh-update-keys |
|||
if ! [ -e "${RECOVER_KEY_PATH}/$md5" ]; then |
|||
log "RECOVERY KEY $md5 JUST EXPIRED" |
|||
reject |
|||
fi |
|||
|
|||
log "ACCEPTED RECOVER COMMAND: $SSH_ORIGINAL_COMMAND" |
|||
sudo "${ssh_args[@]}" |
|||
errlvl="$?" |
|||
|
|||
for key_file in "${RECOVER_KEY_PATH}/$md5"{,.pub}; do |
|||
[ -e "$key_file" ] || continue |
|||
sudo touch "$key_file" ## Update modified time to keep key longer |
|||
done |
|||
|
|||
exit "$errlvl" |
|||
else |
|||
log "REFUSED COMMAND AS IT DOESN'T MATCH ANY EXPECTED COMMAND" |
|||
reject |
|||
fi |
@ -0,0 +1,68 @@ |
|||
#!/bin/bash |
|||
|
|||
## Keep in mind possible race conditions as this script will be called |
|||
## from different place to update the access tokens. |
|||
|
|||
|
|||
## |
|||
## Code |
|||
## |
|||
|
|||
RSYNC_KEY_PATH=/etc/rsync/keys |
|||
RSYNC_HOME=/var/lib/rsync |
|||
BACKUP_KEY_PATH=${RSYNC_KEY_PATH}/backup |
|||
RECOVER_KEY_PATH=${RSYNC_KEY_PATH}/recover |
|||
|
|||
|
|||
mkdir -p "$RSYNC_HOME/.ssh" "$RECOVER_KEY_PATH" |
|||
|
|||
## delete old recovery keys |
|||
find "${RECOVER_KEY_PATH}" \ |
|||
-maxdepth 1 -not -newermt "-15 minutes" \ |
|||
-type f -delete |
|||
|
|||
|
|||
## |
|||
## New |
|||
## |
|||
|
|||
pid=$$ |
|||
new="$RSYNC_HOME"/.ssh/authorized_keys.tmp."$pid" |
|||
touch "$new" |
|||
|
|||
for f in "$BACKUP_KEY_PATH"/*/*.pub "$RECOVER_KEY_PATH"/*.pub; do |
|||
[ -e "$f" ] || continue |
|||
content=$(cat "$f") |
|||
if [[ "$content" == *" "*" "*@* ]]; then |
|||
ident="${content##*@}" |
|||
else |
|||
ident="${f##*/}" |
|||
ident="${ident%.pub}" |
|||
fi |
|||
if ! [[ "$ident" =~ ^[a-zA-Z0-9._-]+$ ]]; then |
|||
echo "bad: '$ident'" >&2 |
|||
continue |
|||
fi |
|||
if [[ "$f" == "${RECOVER_KEY_PATH}"/*.pub ]]; then |
|||
basename=${f##*/} |
|||
basename=${basename%.pub} |
|||
cmd="/usr/local/sbin/ssh-recover-cmd-validate $basename" |
|||
else |
|||
cmd=/usr/local/sbin/ssh-cmd-validate |
|||
fi |
|||
echo "command=\"$cmd \\\"$ident\\\"\",no-port-forwarding,no-X11-forwarding,no-agent-forwarding,no-pty $content" |
|||
done >> "$new" |
|||
|
|||
[ -e "$RSYNC_HOME"/.ssh/authorized_keys ] && |
|||
mv "$RSYNC_HOME"/.ssh/authorized_keys{,.old} |
|||
|
|||
## XXXvlab: Atomic operation. It's the last call to this instruction |
|||
## that will prevail. There are some very special cases where some |
|||
## added key would not be added as expected: for instance an older |
|||
## call to ``ssh-update-key``, if made before a specific public key |
|||
## file was added to directory, could take a longer time to reach this |
|||
## next instruction than a more recent call (that would be after |
|||
## the specific public key was added). |
|||
mv "$new" "$RSYNC_HOME"/.ssh/authorized_keys |
|||
|
|||
chown rsync:rsync "$RSYNC_HOME"/.ssh -R |
@ -0,0 +1,83 @@ |
|||
#!/bin/bash |
|||
|
|||
## Should be executable N time in a row with same result. |
|||
|
|||
. lib/common |
|||
|
|||
set -e |
|||
|
|||
uid=$(docker_get_uid "$SERVICE_NAME" "rsync") |
|||
|
|||
|
|||
LOGS=/var/log/rsync |
|||
mkdir -p "$SERVICE_DATASTORE/$LOGS" |
|||
touch "$SERVICE_DATASTORE/$LOGS/ssh-cmd-validate.log" |
|||
chown -v "$uid" "$SERVICE_DATASTORE/$LOGS" "$SERVICE_DATASTORE/$LOGS/ssh-cmd-validate.log" |
|||
|
|||
rotated_count=$(relation-get rotated-count 2>/dev/null) || true |
|||
rotated_count=${rotated_count:-52} |
|||
|
|||
|
|||
## XXXvlab: a lot of this intelligence should be moved away into ``logrotate`` charm |
|||
DST="$CONFIGSTORE/$TARGET_SERVICE_NAME/etc/logrotate.d/$SERVICE_NAME" |
|||
file_put "$DST" <<EOF |
|||
/var/log/docker/$SERVICE_NAME/ssh-cmd-validate.log |
|||
{ |
|||
weekly |
|||
missingok |
|||
dateext |
|||
dateyesterday |
|||
dateformat _%Y-%m-%d |
|||
extension .log |
|||
rotate $rotated_count |
|||
compress |
|||
delaycompress |
|||
notifempty |
|||
create 640 $uid |
|||
sharedscripts |
|||
} |
|||
|
|||
/var/log/docker/$SERVICE_NAME/ssh-admin-cmd-validate.log |
|||
{ |
|||
weekly |
|||
missingok |
|||
dateext |
|||
dateyesterday |
|||
dateformat _%Y-%m-%d |
|||
extension .log |
|||
rotate $rotated_count |
|||
compress |
|||
delaycompress |
|||
notifempty |
|||
create 660 $uid |
|||
sharedscripts |
|||
} |
|||
|
|||
/var/log/docker/$SERVICE_NAME/target_*_rsync.log |
|||
{ |
|||
weekly |
|||
missingok |
|||
dateext |
|||
dateyesterday |
|||
dateformat _%Y-%m-%d |
|||
extension .log |
|||
rotate $rotated_count |
|||
compress |
|||
delaycompress |
|||
notifempty |
|||
create 640 |
|||
sharedscripts |
|||
} |
|||
EOF |
|||
|
|||
config-add "\ |
|||
services: |
|||
$MASTER_TARGET_SERVICE_NAME: |
|||
volumes: |
|||
- $DST:/etc/logrotate.d/docker-${SERVICE_NAME}:ro |
|||
- $SERVICE_DATASTORE$LOGS:/var/log/docker/$SERVICE_NAME:rw |
|||
$MASTER_BASE_SERVICE_NAME: |
|||
volumes: |
|||
- $SERVICE_DATASTORE$LOGS:$LOGS:rw |
|||
|
|||
" |
@ -1,3 +1,15 @@ |
|||
description: Backup Rsync over SSH Target |
|||
data-resources: |
|||
- /etc/rsync/keys |
|||
- /var/mirror |
|||
- /var/log/rsync |
|||
|
|||
uses: |
|||
log-rotate: |
|||
#constraint: required | recommended | optional |
|||
#auto: pair | summon | none ## default: pair |
|||
constraint: required |
|||
auto: summon |
|||
solves: |
|||
unmanaged-logs: "in docker logs" |
|||
#default-options: |
@ -0,0 +1,12 @@ |
|||
#!/bin/bash |
|||
|
|||
set -e |
|||
|
|||
URL=$(relation-get url) || exit 1 |
|||
|
|||
config-add "\ |
|||
services: |
|||
$MASTER_BASE_SERVICE_NAME: |
|||
environment: |
|||
BASE_URL: $URL |
|||
" |
Some files were not shown because too many files changed in this diff
Write
Preview
Loading…
Cancel
Save
Reference in new issue