You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
5942 lines
201 KiB
5942 lines
201 KiB
#!/bin/bash
|
|
|
|
##
|
|
## TODO:
|
|
## - subordinate container should really be able to modify base image of their master
|
|
## - this could be done through docker-update
|
|
## - I'm not happy with the current build using 'build/' directory, this should be
|
|
## changed to:
|
|
## - always have a base image (specified in metadata), and always have hooks/install
|
|
## executed and merge in image (like docker-build-charm).
|
|
## - container base image is ALWAYS the image of the master container... this brings
|
|
## questions about a double way to express inheritage (through relations as it is
|
|
## implemented now, or through this base-image ?)
|
|
## - the name of the scripts for relation (aka relation_name-relation-joined) is bad as
|
|
## reading the name in a hooks/ dir, there are no way to know if we are the target or
|
|
## the base of the relation.
|
|
## - we could leverage a 'relations/' dir on the root of the charm, with both:
|
|
## 'relations/provide/relation_name' and 'relations/receive/relation_name'
|
|
## - a very bad point with the actual naming is that we can't have a providing AND
|
|
## receiving a relation with same name.
|
|
## - The cache system should keep md5 of docker-compose and other things between runs
|
|
## - The cache system should use underlying function that have only arguments inputs.
|
|
## This will allow to cache completely without issues function in time.
|
|
## - would probably need instrospection in charm custom action to know if these need
|
|
## init or relations to be set up.
|
|
## - Be clear about when the SERVICE name is used and the CHARM name is used.
|
|
## - in case of service contained in another container
|
|
## - in normal case
|
|
## - in docker-compose, can't use charm name: if we want 2 instances of the same charm
|
|
## we are stuck. What will be unique is the name of the service.
|
|
## - some relations are configured in compose.yml but should not trigger the loading
|
|
## of necessary component (for instance, apache --> log-rotate), if log-rotate is
|
|
## not there, this link should considered optional.
|
|
## - Could probably allow an unexistent charm to be populated with only "docker-image:"
|
|
## of the same name. Although this should trigger a visible warning.
|
|
|
|
|
|
#:-
|
|
[ -e /etc/shlib ] && . /etc/shlib || {
|
|
echo "Unsatisfied dependency. Please install 'kal-shlib-core'."
|
|
exit 1
|
|
}
|
|
#:-
|
|
|
|
include common
|
|
include pretty
|
|
include parse
|
|
include charm
|
|
include array
|
|
include cla
|
|
include docker
|
|
|
|
depends shyaml docker
|
|
|
|
exname="compose"
|
|
version=0.1
|
|
usage="$exname [COMPOSE_OPTS] [ACTION [ACTION_OPTS]]"
|
|
help="\
|
|
|
|
$WHITE$exname$NORMAL jobs is to run various shell scripts to build
|
|
a running orchestrated and configured docker containers. These shell
|
|
scripts will have the opportunity to build a 'docker-compose.yml'.
|
|
|
|
Once init script and relations scripts are executed, $WHITE$exname$NORMAL
|
|
delegate the launching to ${WHITE}docker-compose${NORMAL} by providing it
|
|
the final 'docker-compose.yml'.
|
|
|
|
$WHITE$exname$NORMAL also leverage charms to offer some additional custom
|
|
actions per charm, which are simply other scripts that can be
|
|
run without launching ${WHITE}docker-compose${NORMAL}.
|
|
|
|
In compose message, color coding is enforced as such:
|
|
- ${DARKCYAN}action$NORMAL,
|
|
- ${DARKBLUE}relation$NORMAL,
|
|
- ${DARKPINK}charm${NORMAL},
|
|
- ${DARKYELLOW}service${NORMAL},
|
|
- ${WHITE}option-name${NORMAL}/${WHITE}command-name${NORMAL}/${WHITE}Section-Title${NORMAL}
|
|
|
|
$WHITE$exname$NORMAL reads '/etc/compose.conf' for global variables, and
|
|
'/etc/compose.local.conf' for local host adjustements.
|
|
|
|
"
|
|
|
|
|
|
time_now() { date +%s.%3N; }
|
|
time_elapsed() { echo "scale=3; $2 - $1" | bc; }
|
|
|
|
## XXXvlab: this doesn't seem to work when 'compose' is called in
|
|
## a hook of a charm.
|
|
#[[ "${BASH_SOURCE[0]}" == "" ]] && SOURCED=true
|
|
$(return >/dev/null 2>&1) && SOURCED=true
|
|
|
|
errlvl() { return "${1:-1}"; }
|
|
export -f errlvl
|
|
|
|
if [ "$UID" == 0 ]; then
|
|
CACHEDIR=${CACHEDIR:-/var/cache/compose}
|
|
VARDIR=${VARDIR:-/var/lib/compose}
|
|
else
|
|
[ "$XDG_CONFIG_HOME" ] && CACHEDIR=${CACHEDIR:-$XDG_CONFIG_HOME/compose}
|
|
[ "$XDG_DATA_HOME" ] && VARDIR=${VARDIR:-$XDG_DATA_HOME/compose}
|
|
CACHEDIR=${CACHEDIR:-$HOME/.cache/compose}
|
|
VARDIR=${VARDIR:-$HOME/.local/share/compose}
|
|
fi
|
|
export VARDIR CACHEDIR
|
|
|
|
export SERVICE_STATE_PATH=${SERVICE_STATE_PATH:-/var/lib/compose/state}
|
|
|
|
|
|
md5_compat() { md5sum | cut -c -32; }
|
|
quick_cat_file() { quick_cat_stdin < "$1"; }
|
|
quick_cat_stdin() { local IFS=''; while read -r line; do echo "$line"; done ; }
|
|
export -f quick_cat_file quick_cat_stdin md5_compat
|
|
|
|
|
|
p-err() {
|
|
"$@"
|
|
echo "$?"
|
|
}
|
|
export -f p-err
|
|
|
|
|
|
wyq() {
|
|
local exp="$1"
|
|
yq e -e -0 "$1"
|
|
printf "%s" "$?"
|
|
}
|
|
|
|
|
|
wyq-r() {
|
|
local exp="$1"
|
|
yq e -e -0 -r=false "$1"
|
|
printf "%s" "$?"
|
|
}
|
|
|
|
|
|
err-d () {
|
|
local msg="$*"
|
|
shift
|
|
err "$msg"
|
|
print:traceback 1
|
|
}
|
|
export -f err-d
|
|
|
|
print:traceback() {
|
|
local omit_level="${1:-0}"
|
|
if [ -z "$DEBUG" ]; then
|
|
echo " Note: traceback available if you provide {--debug|-d} option." >&2
|
|
return 0
|
|
fi
|
|
echo "${WHITE}Traceback (most recent call last):${NORMAL}" >&2
|
|
local i
|
|
for ((i=${#FUNCNAME[@]} - 1; i > "$omit_level"; i--)); do
|
|
local file="${BASH_SOURCE[$i]}"
|
|
local line="${BASH_LINENO[$i - 1]}"
|
|
local func="${FUNCNAME[$i]}"
|
|
|
|
if [[ -f "$file" ]]; then
|
|
# Get total number of lines in the file
|
|
local total_lines
|
|
total_lines=$(wc -l < "$file")
|
|
|
|
# Calculate start and end lines for context
|
|
local start_line=$((line - 2))
|
|
local end_line=$((line + 2))
|
|
[[ $start_line -lt 1 ]] && start_line=1
|
|
[[ $end_line -gt $total_lines ]] && end_line=$total_lines
|
|
|
|
# Extract context lines
|
|
mapfile -s $((start_line - 1)) -n $((end_line - start_line + 1)) context_lines < "$file"
|
|
|
|
# Calculate minimal indentation
|
|
local min_indent=9999
|
|
for line_text in "${context_lines[@]}"; do
|
|
if [[ -n "$line_text" ]]; then
|
|
# Get leading whitespace
|
|
local leading_whitespace="${line_text%%[![:space:]]*}"
|
|
local indent=${#leading_whitespace}
|
|
if [[ $indent -lt $min_indent ]]; then
|
|
min_indent=$indent
|
|
fi
|
|
fi
|
|
done
|
|
|
|
# Remove minimal indentation from each line
|
|
for idx in "${!context_lines[@]}"; do
|
|
context_lines[$idx]="${context_lines[$idx]:$min_indent}"
|
|
done
|
|
else
|
|
context_lines=("<source unavailable>")
|
|
min_indent=0
|
|
start_line=1
|
|
end_line=1
|
|
fi
|
|
|
|
# Print the traceback frame
|
|
echo " File \"$file\", line $line, in ${WHITE}$func${NORMAL}:"
|
|
|
|
# Print the context with line numbers
|
|
local current_line=$start_line
|
|
for context_line in "${context_lines[@]}"; do
|
|
context_line="${context_line%$'\n'}"
|
|
if [[ $current_line -eq $line ]]; then
|
|
echo " ${DARKYELLOW}$current_line${NORMAL} ${context_line}"
|
|
else
|
|
echo " ${DARKGRAY}$current_line${NORMAL} ${context_line}"
|
|
fi
|
|
((current_line++))
|
|
done
|
|
done >&2
|
|
}
|
|
export -f print:traceback
|
|
|
|
clean_cache() {
|
|
local i=0
|
|
for f in $(ls -t "$CACHEDIR/"*.cache.* 2>/dev/null | tail -n +500); do
|
|
((i++))
|
|
rm -f "$f"
|
|
done
|
|
if (( i > 0 )); then
|
|
debug "${WHITE}Cleaned cache:${NORMAL} Removed $((i)) elements (current cache size is $(du -sh "$CACHEDIR" | cut -f 1))"
|
|
fi
|
|
}
|
|
|
|
|
|
export DEFAULT_COMPOSE_FILE
|
|
|
|
##
|
|
## Merge YAML files
|
|
##
|
|
|
|
export _merge_yaml_common_code="
|
|
|
|
import sys
|
|
import yaml
|
|
|
|
try:
|
|
from yaml import CSafeLoader as SafeLoader, CSafeDumper as SafeDumper
|
|
except ImportError: ## pragma: no cover
|
|
sys.stderr.write('YAML code in pure python\n')
|
|
exit(1)
|
|
from yaml import SafeLoader, SafeDumper
|
|
|
|
class MySafeLoader(SafeLoader): pass
|
|
class MySafeDumper(SafeDumper): pass
|
|
|
|
|
|
try:
|
|
# included in standard lib from Python 2.7
|
|
from collections import OrderedDict
|
|
except ImportError:
|
|
# try importing the backported drop-in replacement
|
|
# it's available on PyPI
|
|
from ordereddict import OrderedDict
|
|
|
|
|
|
## Ensure that there are no collision with legacy OrderedDict
|
|
## that could be used for omap for instance.
|
|
class MyOrderedDict(OrderedDict):
|
|
pass
|
|
|
|
MySafeDumper.add_representer(
|
|
MyOrderedDict,
|
|
lambda cls, data: cls.represent_dict(data.items()))
|
|
|
|
|
|
def construct_omap(cls, node):
|
|
cls.flatten_mapping(node)
|
|
return MyOrderedDict(cls.construct_pairs(node))
|
|
|
|
|
|
MySafeLoader.add_constructor(
|
|
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
|
|
construct_omap)
|
|
|
|
|
|
##
|
|
## Support local and global objects
|
|
##
|
|
|
|
class EncapsulatedNode(object): pass
|
|
|
|
|
|
def mk_encapsulated_node(s, node):
|
|
|
|
method = 'construct_%s' % (node.id, )
|
|
data = getattr(s, method)(node)
|
|
|
|
class _E(data.__class__, EncapsulatedNode):
|
|
pass
|
|
|
|
_E.__name__ = str(node.tag)
|
|
_E._node = node
|
|
return _E(data)
|
|
|
|
|
|
def represent_encapsulated_node(s, o):
|
|
value = s.represent_data(o.__class__.__bases__[0](o))
|
|
value.tag = o.__class__.__name__
|
|
return value
|
|
|
|
|
|
MySafeDumper.add_multi_representer(EncapsulatedNode,
|
|
represent_encapsulated_node)
|
|
MySafeLoader.add_constructor(None, mk_encapsulated_node)
|
|
|
|
|
|
def fc(filename):
|
|
with open(filename) as f:
|
|
return f.read()
|
|
|
|
def merge(*args):
|
|
# sys.stderr.write('%r\n' % (args, ))
|
|
args = [arg for arg in args if arg is not None]
|
|
if len(args) == 0:
|
|
return None
|
|
if len(args) == 1:
|
|
return args[0]
|
|
if all(isinstance(arg, (int, basestring, bool, float)) for arg in args):
|
|
return args[-1]
|
|
elif all(isinstance(arg, list) for arg in args):
|
|
res = []
|
|
for arg in args:
|
|
for elt in arg:
|
|
if elt in res:
|
|
res.remove(elt)
|
|
res.append(elt)
|
|
return res
|
|
elif all(isinstance(arg, dict) for arg in args):
|
|
keys = set()
|
|
for arg in args:
|
|
keys |= set(arg.keys())
|
|
dct = {}
|
|
for key in keys:
|
|
sub_args = []
|
|
for arg in args:
|
|
if key in arg:
|
|
sub_args.append(arg)
|
|
try:
|
|
dct[key] = merge(*(a[key] for a in sub_args))
|
|
except NotImplementedError as e:
|
|
raise NotImplementedError(
|
|
e.args[0],
|
|
'%s.%s' % (key, e.args[1]) if e.args[1] else key,
|
|
e.args[2])
|
|
if dct[key] is None:
|
|
del dct[key]
|
|
return dct
|
|
else:
|
|
raise NotImplementedError(
|
|
'Unsupported types: %s'
|
|
% (', '.join(list(set(arg.__class__.__name__ for arg in args)))), '', args)
|
|
return None
|
|
|
|
def merge_cli(*args):
|
|
try:
|
|
c = merge(*args)
|
|
except NotImplementedError as e:
|
|
sys.stderr.write('Merging Failed: %s.\n%s\n'
|
|
' Values are:\n %s\n'
|
|
% (e.args[0],
|
|
' Conflicting key is %r.' % e.args[1] if e.args[1] else
|
|
' Conflict at base of structure.',
|
|
'\\n '.join('v%d: %r' % (i, a)
|
|
for i, a in enumerate(e.args[2]))))
|
|
exit(1)
|
|
if c is not None:
|
|
print '%s' % yaml.dump(c, default_flow_style=False, Dumper=MySafeDumper)
|
|
|
|
"
|
|
|
|
|
|
merge_yaml() {
|
|
|
|
if ! [ -r "$state_tmpdir/merge_yaml.py" ]; then
|
|
cat <<EOF > "$state_tmpdir/merge_yaml.py"
|
|
|
|
$_merge_yaml_common_code
|
|
|
|
merge_cli(*(yaml.load(fc(f), Loader=MySafeLoader) for f in sys.argv[1:]))
|
|
EOF
|
|
fi
|
|
|
|
python "$state_tmpdir/merge_yaml.py" "$@"
|
|
}
|
|
export -f merge_yaml
|
|
|
|
|
|
merge_yaml_str() {
|
|
local entries="$@"
|
|
|
|
if ! [ -r "$state_tmpdir/merge_yaml_str.py" ]; then
|
|
cat <<EOF > "$state_tmpdir/merge_yaml_str.py" || return 1
|
|
|
|
$_merge_yaml_common_code
|
|
|
|
merge_cli(*(yaml.load(f, Loader=MySafeLoader) for f in sys.argv[1:]))
|
|
EOF
|
|
fi
|
|
|
|
if ! python "$state_tmpdir/merge_yaml_str.py" "$@"; then
|
|
err "Failed to merge yaml strings:"
|
|
local s
|
|
for s in "$@"; do
|
|
printf " - \n"
|
|
printf "%s\n" "$s" | prefix " ${GRAY}|$NORMAL "
|
|
done >&2
|
|
return 1
|
|
fi
|
|
}
|
|
export -f merge_yaml_str
|
|
|
|
|
|
yaml_get_values() {
|
|
local sep=${1:-$'\n'} value input type first elt
|
|
input=$(cat -)
|
|
if [ -z "$input" ] || [[ "$input" =~ ^None|null$ ]]; then
|
|
return 0
|
|
fi
|
|
type=$(e "$input" | shyaml get-type)
|
|
value=
|
|
case "$type" in
|
|
"sequence")
|
|
first=1
|
|
while read-0 elt; do
|
|
elt="$(e "$elt" | yaml_get_interpret)" || return 1
|
|
[ "$elt" ] || continue
|
|
if [ "$first" ]; then
|
|
first=
|
|
else
|
|
value+="$sep"
|
|
fi
|
|
first=
|
|
value+="$elt"
|
|
done < <(e "$input" | shyaml -y get-values-0)
|
|
;;
|
|
"struct")
|
|
while read-0 val; do
|
|
value+=$'\n'"$(e "$val" | yaml_get_interpret)" || return 1
|
|
done < <(e "$input" | shyaml -y values-0)
|
|
;;
|
|
"NoneType")
|
|
value=""
|
|
;;
|
|
"str"|*)
|
|
value+="$(e "$input" | yaml_get_interpret)"
|
|
;;
|
|
esac
|
|
e "$value"
|
|
}
|
|
export -f yaml_get_values
|
|
|
|
|
|
yaml_key_val_str() {
|
|
local entries="$@"
|
|
|
|
if ! [ -r "$state_tmpdir/yaml_key_val_str.py" ]; then
|
|
cat <<EOF > "$state_tmpdir/yaml_key_val_str.py"
|
|
|
|
$_merge_yaml_common_code
|
|
|
|
print '%s' % yaml.dump(
|
|
{
|
|
yaml.load(sys.argv[1], Loader=MySafeLoader):
|
|
yaml.load(sys.argv[2], Loader=MySafeLoader)
|
|
},
|
|
default_flow_style=False,
|
|
Dumper=MySafeDumper,
|
|
)
|
|
|
|
EOF
|
|
fi
|
|
|
|
python "$state_tmpdir/yaml_key_val_str.py" "$@"
|
|
}
|
|
export -f yaml_key_val_str
|
|
|
|
|
|
##
|
|
## Docker
|
|
##
|
|
|
|
docker_has_image() {
|
|
local image="$1"
|
|
images=$(docker images -q "$image" 2>/dev/null) || {
|
|
err "docker images call has failed unexpectedly."
|
|
return 1
|
|
}
|
|
[ -n "$images" ]
|
|
}
|
|
export -f docker_has_image
|
|
|
|
|
|
docker_image_id() {
|
|
local image="$1"
|
|
image_id=$(docker inspect "$image" --format='{{.Id}}') || return 1
|
|
echo "$image_id" # | tee "$cache_file"
|
|
}
|
|
export -f docker_image_id
|
|
|
|
|
|
cached_cmd_on_image() {
|
|
local image="$1" cache_file
|
|
image_id=$(docker_image_id "$image") || return 1
|
|
cache_file="$CACHEDIR/$FUNCNAME.cache.$(echo "$*" | md5_compat)"
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
quick_cat_stdin < "$cache_file"
|
|
return 0
|
|
fi
|
|
shift
|
|
out=$(docker run -i --rm --entrypoint /bin/sh "$image_id" -c "$*") || return 1
|
|
echo "$out" | tee "$cache_file"
|
|
}
|
|
export -f cached_cmd_on_image
|
|
|
|
|
|
cmd_on_base_image() {
|
|
local service="$1" base_image
|
|
shift
|
|
base_image=$(service_ensure_image_ready "$service") || return 1
|
|
docker run -i --rm --entrypoint /bin/bash "$base_image" -c "$*"
|
|
}
|
|
export -f cmd_on_base_image
|
|
|
|
|
|
cached_cmd_on_base_image() {
|
|
local service="$1" base_image cache_file="$state_tmpdir/$FUNCNAME.cache.$(printf "%s\0" "$@" | md5_compat)"
|
|
shift
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
quick_cat_stdin < "$cache_file"
|
|
return 0
|
|
fi
|
|
base_image=$(service_ensure_image_ready "$service") || return 1
|
|
result=$(cached_cmd_on_image "$base_image" "$@") || return 1
|
|
echo "$result" | tee "$cache_file"
|
|
}
|
|
export -f cached_cmd_on_base_image
|
|
|
|
|
|
docker_update() {
|
|
## YYY: warning, we a storing important information in cache, cache can
|
|
## be removed.
|
|
|
|
## We want here to cache the last script on given service whatever that script was
|
|
local service="$1" script="$2" cache_file="$CACHEDIR/$FUNCNAME.cache.$1" \
|
|
previous_base_image stored_image_id
|
|
shift
|
|
shift
|
|
## this will build it if necessary
|
|
base_image=$(service_ensure_image_ready "$service") || return 1
|
|
|
|
## XXXvlab: there are probably ways to avoid rebuilding that each time
|
|
image_id="$(docker_image_id "$base_image")" || return 1
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
info "Cache file exists"
|
|
read-0 previous_base_image stored_image_id < <(cat "$cache_file")
|
|
info "previous: $previous_base_image"
|
|
info "stored: $stored_image_id"
|
|
else
|
|
info "No cache file $cache_file"
|
|
previous_base_image=""
|
|
fi
|
|
if [ "$previous_base_image" -a "$stored_image_id" == "$image_id" ]; then
|
|
info "Resetting $base_image to $previous_base_image"
|
|
docker tag "$previous_base_image" "$base_image" || return 1
|
|
image_id="$(docker_image_id "$base_image")" || return 1
|
|
else
|
|
previous_base_image="$image_id"
|
|
fi
|
|
info "Updating base image: $base_image (hash: $image_id)"
|
|
echo "$script" | dupd --debug -u "$base_image" -- "$@" || {
|
|
err "Failed updating base image"
|
|
return 1
|
|
}
|
|
new_image_id="$(docker_image_id "$base_image")"
|
|
[ "$new_image_id" == "$previous_base_image" ] && {
|
|
err "Image was not updated correctly (same id)."
|
|
return 1
|
|
}
|
|
printf "%s\0" "$previous_base_image" "$new_image_id" > "$cache_file"
|
|
info "Wrote cache file $cache_file"
|
|
}
|
|
export -f docker_update
|
|
|
|
|
|
image_exposed_ports_0() {
|
|
local image="$1"
|
|
docker inspect --format='{{range $p, $conf := .Config.ExposedPorts}}{{$p}}{{"\x00"}}{{end}}' "$image"
|
|
}
|
|
export -f image_exposed_ports_0
|
|
|
|
|
|
## feature not yet included in docker: https://github.com/moby/moby/issues/16079
|
|
docker_image_export_dir() {
|
|
local image="$1" src="$2" dst="$3" container_id
|
|
(
|
|
container_id=$(docker create "$image") || exit 1
|
|
trap_add EXIT,ERR "docker rm \"$container_id\" >/dev/null"
|
|
docker cp "$container_id":"$src" "$dst"
|
|
)
|
|
}
|
|
export -f docker_image_export_dir
|
|
|
|
|
|
service_base_image_export_dir() {
|
|
local service="$1" src="$2" dst="$3" base_image
|
|
shift
|
|
base_image=$(service_ensure_image_ready "$service") || return 1
|
|
docker_image_export_dir "$base_image" "$src" "$dst"
|
|
}
|
|
export -f service_base_image_export_dir
|
|
|
|
|
|
service_base_image_id() {
|
|
local service="$1" src="$2" dst="$3" base_image
|
|
shift
|
|
base_image=$(service_ensure_image_ready "$service") || return 1
|
|
docker inspect "$base_image" --format="{{ .Id }}"
|
|
}
|
|
export -f service_base_image_id
|
|
|
|
|
|
##
|
|
## Generic
|
|
##
|
|
|
|
|
|
fn.exists() {
|
|
declare -F "$1" >/dev/null
|
|
}
|
|
|
|
|
|
str_pattern_matches() {
|
|
local str="$1"
|
|
shift
|
|
for pattern in "$@"; do
|
|
eval "[[ \"$str\" == $pattern ]]" && return 0
|
|
done
|
|
return 1
|
|
}
|
|
|
|
|
|
str_matches() {
|
|
local str="$1"
|
|
shift
|
|
for pattern in "$@"; do
|
|
[[ "$str" == "$pattern" ]] && return 0
|
|
done
|
|
return 1
|
|
}
|
|
|
|
gen_password() {
|
|
local l=( {a..z} {A..Z} {0..9} ) nl="${#l[@]}" size=${1:-16}
|
|
while ((size--)); do
|
|
echo -n "${l[$((RANDOM * nl / 32768))]}"
|
|
done
|
|
echo
|
|
}
|
|
export -f gen_password
|
|
|
|
|
|
file_put() {
|
|
local TARGET="$1"
|
|
mkdir -p "$(dirname "$TARGET")" &&
|
|
cat - > "$TARGET"
|
|
}
|
|
export -f file_put
|
|
|
|
|
|
file_put_0() {
|
|
local TARGET="$1"
|
|
mkdir -p "$(dirname "$TARGET")" &&
|
|
cat > "$TARGET"
|
|
}
|
|
export -f file_put_0
|
|
|
|
|
|
fetch_file() {
|
|
local src="$1"
|
|
|
|
case "$src" in
|
|
*"://"*)
|
|
err "Unsupported target scheme."
|
|
return 1
|
|
;;
|
|
*)
|
|
## Try direct
|
|
if ! [ -r "$src" ]; then
|
|
err "File '$src' not found/readable."
|
|
return 1
|
|
fi
|
|
cat "$src" || return 1
|
|
;;
|
|
esac
|
|
}
|
|
export -f fetch_file
|
|
|
|
|
|
## receives stdin content to decompress on stdout
|
|
## stdout content should be tar format.
|
|
uncompress_file() {
|
|
local filename="$1"
|
|
|
|
## Warning, the content of the file is already as stdin, the filename
|
|
## is there to hint for correct decompression.
|
|
case "$filename" in
|
|
*".gz")
|
|
gunzip
|
|
;;
|
|
*".bz2")
|
|
bunzip2
|
|
;;
|
|
*)
|
|
cat
|
|
;;
|
|
esac
|
|
|
|
}
|
|
export -f uncompress_file
|
|
|
|
|
|
get_file() {
|
|
local src="$1"
|
|
|
|
fetch_file "$src" | uncompress_file "$src"
|
|
}
|
|
export -f get_file
|
|
|
|
|
|
##
|
|
## Common database lib
|
|
##
|
|
|
|
_clean_docker() {
|
|
local _DB_NAME="$1" container_id="$2"
|
|
(
|
|
set +e
|
|
debug "Removing container $_DB_NAME"
|
|
docker stop "$container_id"
|
|
docker rm "$_DB_NAME"
|
|
docker network rm "${_DB_NAME}"
|
|
rm -vf "$state_tmpdir/${_DB_NAME}.state"
|
|
) >&2
|
|
}
|
|
export -f _clean_docker
|
|
|
|
|
|
get_service_base_image_dir_uid_gid() {
|
|
local service="$1" dir="$2" uid_gid
|
|
uid_gid=$(cached_cmd_on_base_image "$service" "stat -c '%u %g' '$dir'") || {
|
|
debug "Failed to query '$dir' uid in ${DARKYELLOW}$service${NORMAL} base image."
|
|
return 1
|
|
}
|
|
info "uid and gid from ${DARKYELLOW}$service${NORMAL}:$dir is '$uid_gid'"
|
|
echo "$uid_gid"
|
|
}
|
|
export -f get_service_base_image_dir_uid_gid
|
|
|
|
|
|
get_service_type() {
|
|
if [ -z "$CHARM_STORE_HASH" ]; then
|
|
err-d "Expected \$CHARM_STORE_HASH to be set."
|
|
return 1
|
|
fi
|
|
local service="$1" cache_file="$CACHEDIR/$FUNCNAME.cache.$1.$CHARM_STORE_HASH"
|
|
if [ -z "$service" ]; then
|
|
print_syntax_error "$FUNCNAME: Please specify a service as first argument."
|
|
return 1
|
|
fi
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
master_target_service="$(get_top_master_service_for_service "$service")" || return 1
|
|
charm=$(get_service_charm "$master_target_service") || return 1
|
|
metadata=$(charm.metadata "$charm") || return 1
|
|
printf "%s" "$metadata" | shyaml get-value type service 2>/dev/null |
|
|
tee "$cache_file"
|
|
}
|
|
export -f get_service_type
|
|
|
|
are_files_locked_in_dir() {
|
|
local dir="$1" device hdev ldev
|
|
device=$(stat -c %d "$dir") || {
|
|
err "Can't stat '$dir'."
|
|
return 1
|
|
}
|
|
device=$(printf "%04x" $device)
|
|
hdev=${device:0:2}
|
|
ldev=${device:2:2}
|
|
inodes=$(find "$dir" -printf ':%i:\n')
|
|
|
|
found=
|
|
while read -r inode; do
|
|
debug "try inode:$inode"
|
|
if [[ "$inodes" == *":$inode:"* ]]; then
|
|
found=1
|
|
break
|
|
fi
|
|
done < <(cat /proc/locks | grep " $hdev:$ldev:" | sed -r "s/^.*$hdev:$ldev:([0-9]+).*$/\1/g")
|
|
|
|
[ "$found" ]
|
|
}
|
|
export -f are_files_locked_in_dir
|
|
|
|
|
|
set_db_params() {
|
|
local docker_ip="$1" docker_network="$2"
|
|
if [ -z "$DB_PARAMS_LOADED" ]; then
|
|
DB_PARAMS_LOADED=1
|
|
_set_db_params "$docker_ip" "$docker_network"
|
|
fi
|
|
}
|
|
export -f set_db_params
|
|
|
|
export _PID="$$"
|
|
ensure_db_docker_running () {
|
|
local _STATE_FILE errlvl project
|
|
|
|
_DB_NAME="db_${DB_NAME}_${_PID}"
|
|
_STATE_FILE="$state_tmpdir/${_DB_NAME}.state"
|
|
if [ -e "$_STATE_FILE" ]; then
|
|
IFS=: read DOCKER_NETWORK DOCKER_IP <<<"$(cat "$_STATE_FILE")"
|
|
|
|
debug "Re-using previous docker/connection '$DOCKER_IP'."
|
|
set_db_params "$DOCKER_IP" "$DOCKER_NETWORK"
|
|
|
|
return 0
|
|
fi
|
|
|
|
if [ -e "$state_tmpdir/${_DB_NAME}.working" ]; then
|
|
## avoid recursive calls.
|
|
if [ -z "$DOCKER_IP" ]; then
|
|
err "Currently figuring up DOCKER_IP, please set it yourself before this call if needed."
|
|
return 1
|
|
else
|
|
debug "ignoring recursive call of 'ensure_db_docker_running'."
|
|
fi
|
|
return 0
|
|
fi
|
|
|
|
touch "$state_tmpdir/${_DB_NAME}.working"
|
|
|
|
docker rm "$_DB_NAME" 2>/dev/null || true
|
|
|
|
host_db_working_dir="$HOST_DATASTORE/${SERVICE_NAME}$DB_DATADIR"
|
|
|
|
if is_db_locked; then
|
|
info "Some process is using '$host_db_working_dir'. Trying to find a docker that would do this..."
|
|
found=
|
|
for docker_id in $(docker ps -q); do
|
|
has_volume_mounted=$(
|
|
docker inspect \
|
|
--format "{{range .Mounts}}{{if eq .Destination \"$DB_DATADIR\"}}{{.Source}}{{end}}{{end}}" \
|
|
"$docker_id")
|
|
if [ "$has_volume_mounted" == "$host_db_working_dir" ]; then
|
|
info "docker '$docker_id' uses '$has_volume_mounted'."
|
|
project=$(docker inspect "$docker_id" \
|
|
--format "{{index .Config.Labels \"compose.project\" }}") || continue
|
|
info "docker '$docker_id' is from project '$project' (current project is '$PROJECT_NAME')."
|
|
[ "$project" == "$PROJECT_NAME" ] || continue
|
|
found="$docker_id"
|
|
break
|
|
fi
|
|
done
|
|
if [ -z "$found" ]; then
|
|
err "Please shutdown any other docker using this directory."
|
|
return 1
|
|
fi
|
|
export container_id="$found"
|
|
info "Found docker $docker_id is already running."
|
|
else
|
|
verb "Database is not locked."
|
|
if ! docker_has_image "$DOCKER_BASE_IMAGE"; then
|
|
err "Unexpected missing docker image $DOCKER_BASE_IMAGE."
|
|
return 1
|
|
fi
|
|
|
|
_set_server_db_params || return 1
|
|
debug docker network create "$_DB_NAME"
|
|
if ! network_id=$(docker network create "$_DB_NAME"); then
|
|
err "'docker network create $_DB_NAME' failed !"
|
|
_clean_docker "$_DB_NAME" "$container_id"
|
|
rm "$state_tmpdir/${_DB_NAME}.working"
|
|
return 1
|
|
fi
|
|
debug docker run -d \
|
|
--name "$_DB_NAME" \
|
|
"${server_docker_opts[@]}" \
|
|
--network "$_DB_NAME" \
|
|
-v "$host_db_working_dir:$DB_DATADIR" \
|
|
"$DOCKER_BASE_IMAGE"
|
|
if ! container_id=$(
|
|
docker run -d \
|
|
--name "$_DB_NAME" \
|
|
"${server_docker_opts[@]}" \
|
|
--network "$_DB_NAME" \
|
|
-v "$host_db_working_dir:$DB_DATADIR" \
|
|
"$DOCKER_BASE_IMAGE"
|
|
); then
|
|
err "'docker run' failed !"
|
|
_clean_docker "$_DB_NAME" "$container_id"
|
|
rm "$state_tmpdir/${_DB_NAME}.working"
|
|
return 1
|
|
fi
|
|
trap_add EXIT,ERR "_clean_docker \"$_DB_NAME\" \"$container_id\""
|
|
fi
|
|
|
|
if docker_ip=$(wait_for_docker_ip "$container_id"); then
|
|
IFS=: read DOCKER_NETWORK DOCKER_IP <<<"$docker_ip"
|
|
echo "$docker_ip" > "$_STATE_FILE"
|
|
debug "written '$_STATE_FILE'"
|
|
rm "$state_tmpdir/${_DB_NAME}.working"
|
|
set_db_params "$DOCKER_IP" "$DOCKER_NETWORK"
|
|
return 0
|
|
else
|
|
errlvl="$?"
|
|
err "Db not found (errlvl: $errlvl). Tail of docker logs follows:"
|
|
docker logs --tail=5 "$container_id" 2>&1 | prefix " | " >&2
|
|
rm "$state_tmpdir/${_DB_NAME}.working"
|
|
return "$errlvl"
|
|
fi
|
|
}
|
|
export -f ensure_db_docker_running
|
|
|
|
|
|
## Require to set $db_docker_opts if needed, and $DB_PASSFILE
|
|
##
|
|
_dcmd() {
|
|
local docker_opts command="$1"
|
|
shift
|
|
|
|
debug "Db> $command $@"
|
|
|
|
if [ "$HOST_DB_PASSFILE" -a -f "$LOCAL_DB_PASSFILE" -a "$CLIENT_DB_PASSFILE" ]; then
|
|
verb "Found and using '$HOST_DB_PASSFILE' as '$CLIENT_DB_PASSFILE'."
|
|
docker_opts=("${db_docker_opts[@]}" "-v" "$HOST_DB_PASSFILE:$CLIENT_DB_PASSFILE")
|
|
else
|
|
docker_opts=("${db_docker_opts[@]}")
|
|
fi
|
|
|
|
## XXXX was here: actualy, we need only connection between this version and the client version
|
|
debug docker run -i --rm \
|
|
"${docker_opts[@]}" \
|
|
--entrypoint "$command" "$DOCKER_BASE_IMAGE" "${db_cmd_opts[@]}" "$@"
|
|
docker run -i --rm \
|
|
"${docker_opts[@]}" \
|
|
--entrypoint "$command" "$DOCKER_BASE_IMAGE" "${db_cmd_opts[@]}" "$@"
|
|
}
|
|
export -f _dcmd
|
|
|
|
|
|
## Executes code through db
|
|
dcmd() {
|
|
local fun
|
|
[ "$DB_NAME" ] || print_syntax_error "$FUNCNAME: You must provide \$DB_NAME."
|
|
[ "$DB_DATADIR" ] || print_syntax_error "$FUNCNAME: You must provide \$DB_DATADIR."
|
|
# [ "$DB_PASSFILE" ] || print_syntax_error "$FUNCNAME: You must provide \$DB_PASSFILE."
|
|
[ "$_PID" ] || print_syntax_error "$FUNCNAME: You must provide \$_PID."
|
|
for fun in is_db_locked _set_db_params ddb; do
|
|
[ "$(type -t "$fun")" == "function" ] ||
|
|
print_syntax_error "$FUNCNAME: You must provide function '$fun'."
|
|
done
|
|
ensure_db_docker_running </dev/null || return 1
|
|
|
|
_dcmd "$@"
|
|
}
|
|
export -f dcmd
|
|
|
|
|
|
get_docker_ips() {
|
|
local name="$1" ip format network_id
|
|
if ! docker inspect --format='{{ .NetworkSettings.Networks }}' "$name" >/dev/null 2>&1; then
|
|
echo "default:$(docker inspect --format='{{ .NetworkSettings.IPAddress }}' "$name" 2>/dev/null)"
|
|
else
|
|
format='{{range $name, $conf := .NetworkSettings.Networks}}{{$name}}{{"\x00"}}{{$conf.IPAddress}}{{"\x00"}}{{end}}'
|
|
while read-0 network_id ip; do
|
|
printf "%s:%s\n" "$network_id" "$ip"
|
|
done < <(docker inspect --format="$format" "$name")
|
|
fi
|
|
}
|
|
export -f get_docker_ips
|
|
|
|
|
|
get_docker_ip() {
|
|
local name="$1"
|
|
get_docker_ips "$name"
|
|
}
|
|
export -f get_docker_ip
|
|
|
|
|
|
wait_docker_ip() {
|
|
local name="$1" timeout="${2:-15}" timeout_count=0 docker_ip=
|
|
start=$SECONDS
|
|
while [ -z "$docker_ip" ]; do
|
|
sleep 0.5
|
|
docker_ip=$(get_docker_ip "$name") && break
|
|
elapsed=$((SECONDS - start))
|
|
if ((elapsed > timeout)); then
|
|
err "${RED}timeout error${NORMAL}(${timeout}s):" \
|
|
"Could not find '$name' docker container's IP."
|
|
return 1
|
|
fi
|
|
[ "$elapsed" == "$old_elapsed" ] ||
|
|
verb "Waiting for docker $name... ($elapsed/$timeout)"
|
|
old_elapsed="$elapsed"
|
|
done
|
|
verb "Found docker $name network and IP: $docker_ip"
|
|
echo "$docker_ip"
|
|
}
|
|
export -f wait_docker_ip
|
|
|
|
|
|
wait_for_tcp_port() {
|
|
local network=$1 host_port=$2 timeout=${3:-60}
|
|
verb "Trying to connect to $host_port"
|
|
bash_image=${DEFAULT_BASH_IMAGE:-docker.0k.io/bash}
|
|
#echo docker run --rm -i --network "$network" "$bash_image" >&2
|
|
docker run --rm -i --network "$network" "$bash_image" <<EOF
|
|
start=\$SECONDS
|
|
while true; do
|
|
timeout 1 bash -c "</dev/tcp/${host_port/://}" >/dev/null 2>&1 && break
|
|
sleep 0.2
|
|
if [ "\$((SECONDS - start))" -gt "$timeout" ]; then
|
|
exit 1
|
|
fi
|
|
done
|
|
exit 0
|
|
EOF
|
|
if [ "$?" != 0 ]; then
|
|
err "${RED}timeout error${NORMAL}(${timeout}s):"\
|
|
"Could not connect to $host_port."
|
|
return 1
|
|
fi
|
|
return 0
|
|
}
|
|
export -f wait_for_tcp_port
|
|
|
|
|
|
## Warning: requires a ``ddb`` matching current database to be checked
|
|
wait_for_docker_ip() {
|
|
local name=$1 DOCKER_IP= DOCKER_NETWORK= docker_ips= docker_ip= elapsed timeout=10
|
|
docker_ip=$(wait_docker_ip "$name" 5) || return 1
|
|
IFS=: read DOCKER_NETWORK DOCKER_IP <<<"$docker_ip"
|
|
if ! str_is_ipv4 "$DOCKER_IP"; then
|
|
err "internal 'wait_docker_ip' did not return a valid IP. Returned IP is '$DOCKER_IP'."
|
|
return 1
|
|
fi
|
|
set_db_params "$DOCKER_IP" "$DOCKER_NETWORK"
|
|
while read-0 port; do
|
|
IFS="/" read port type <<<"$port"
|
|
[ "$type" == "tcp" ] || continue
|
|
wait_for_tcp_port "$DOCKER_NETWORK" "$DOCKER_IP:${port}" || return 17
|
|
info "Host/Port $DOCKER_IP:${port} checked ${GREEN}open${NORMAL}."
|
|
## XXXvlab: what to do with more than one port ?
|
|
break
|
|
done < <(image_exposed_ports_0 "$container_id")
|
|
|
|
## Checking direct connection
|
|
timeout=120
|
|
start=$SECONDS
|
|
while true; do
|
|
if err=$(echo "$check_command" | ddb 2>&1 >/dev/null); then
|
|
break
|
|
fi
|
|
if ! [[ "$err" == *"the database system is starting up" ]]; then
|
|
err "${RED}db connection error${NORMAL}:" \
|
|
"Could not connect to db on $DOCKER_IP container's IP."
|
|
echo " Note: IP up, TCP ports is(are) open" >&2
|
|
if [ "$err" ]; then
|
|
echo " Error:" >&2
|
|
printf "%s\n" "$err" | prefix " ${RED}!${NORMAL} " >&2
|
|
fi
|
|
return 18
|
|
fi
|
|
debug "Got 'database system is starting up' error."
|
|
elapsed=$((SECONDS - start))
|
|
if ((elapsed > timeout)); then
|
|
err "${RED}db connection error${NORMAL}:"\
|
|
"Could not connect to db on $DOCKER_IP" \
|
|
"container's IP. (IP up, TCP ports is(are) open, sql answer after ${timeout}s)"
|
|
return 1
|
|
fi
|
|
sleep 0.2
|
|
done
|
|
echo "${DOCKER_NETWORK}:${DOCKER_IP}"
|
|
return 0
|
|
}
|
|
export -f wait_for_docker_ip
|
|
|
|
|
|
docker_add_host_declaration() {
|
|
local src_docker=$1 domain=$2 dst_docker=$3 dst_docker_ip= dst_docker_network
|
|
dst_docker_ip=$(wait_docker_ip "$dst_docker") || exit 1
|
|
|
|
IFS=: read dst_docker_ip dst_docker_network <<<"$dst_docker_ip"
|
|
|
|
docker exec -i "$src_docker" bash <<EOF
|
|
if cat /etc/hosts | grep -E "^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\s+$domain\$" > /dev/null 2>&1; then
|
|
sed -ri "s/^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\s+$domain\$/$dst_docker_ip $domain/g" /etc/hosts
|
|
else
|
|
echo "$dst_docker_ip $domain" >> /etc/hosts
|
|
fi
|
|
EOF
|
|
}
|
|
export -f docker_add_host_declaration
|
|
|
|
|
|
get_running_containers_for_service() {
|
|
local service="$1" project="$2"
|
|
|
|
project=${project:-$PROJECT_NAME}
|
|
|
|
[ -n "$project" ] || {
|
|
err "No project name was defined yet."
|
|
return 1
|
|
}
|
|
|
|
docker ps \
|
|
--filter label="compose.project=$project" \
|
|
--filter label="compose.master-service=$service" \
|
|
--format="{{.ID}}"
|
|
}
|
|
export -f get_running_containers_for_service
|
|
|
|
|
|
get_container_network_ips() {
|
|
local container="$1"
|
|
docker inspect "$container" \
|
|
--format='{{range $key, $val :=.NetworkSettings.Networks}}{{$key}}{{"\x00"}}{{$val.IPAddress}}{{"\x00"}}{{end}}'
|
|
}
|
|
export -f get_container_network_ips
|
|
|
|
|
|
get_container_network_ip() {
|
|
local container="$1"
|
|
while read-0 network ip; do
|
|
printf "%s\0" "$network" "$ip"
|
|
break
|
|
done < <(get_container_network_ips "$container")
|
|
}
|
|
export -f get_container_network_ip
|
|
|
|
|
|
##
|
|
## Internal Process
|
|
##
|
|
|
|
|
|
get_docker_compose_links() {
|
|
local service="$1" cache_file="$state_tmpdir/$FUNCNAME.cache.$1" \
|
|
deps master_service master_target_service _relation_name \
|
|
target_service _relation_config tech_dep
|
|
if [ -z "$service" ]; then
|
|
print_syntax_error "$FUNCNAME: Please specify a service as first argument."
|
|
return 1
|
|
fi
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
master_service=$(get_top_master_service_for_service "$service") || return 1
|
|
deps=()
|
|
while read-0 _relation_name target_service _relation_config tech_dep; do
|
|
master_target_service="$(get_top_master_service_for_service "$target_service")" || return 1
|
|
[ "$master_service" == "$master_target_service" ] && continue
|
|
type="$(get_service_type "$target_service")" || return 1
|
|
[ "$type" == "stub" ] && continue
|
|
[ "$type" == "run-once" ] && continue
|
|
if [ "$tech_dep" == "reversed" ]; then
|
|
deps+=("$(echo -en "$master_target_service:\n links:\n - $master_service")")
|
|
elif [[ "$tech_dep" =~ ^(True|true)$ ]]; then
|
|
deps+=("$(echo -en "$master_service:\n links:\n - $master_target_service")")
|
|
fi
|
|
## XXXvlab: an attempt to add depends_on, but this doesn't work well actually
|
|
## as there's a circular dependency issue. We don't really want the full feature
|
|
## of depends_on, but just to add it as targets when doing an 'up'
|
|
# deps+=("$(echo -en "$master_service:\n depends_on:\n - $master_target_service")")
|
|
done < <(get_service_relations "$service")
|
|
merge_yaml_str "${deps[@]}" | tee "$cache_file" || return 1
|
|
if [ "${PIPESTATUS[0]}" != 0 ]; then
|
|
rm "$cache_file"
|
|
err "Failed to merge YAML from all ${WHITE}links${NORMAL} dependencies."
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
|
|
_get_docker_compose_opts() {
|
|
local service="$1" cache_file="$state_tmpdir/$FUNCNAME.cache.$1" \
|
|
compose_def master_service docker_compose_opts
|
|
if [ -z "$service" ]; then
|
|
print_syntax_error "$FUNCNAME: Please specify a service as first argument."
|
|
return 1
|
|
fi
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
compose_def="$(get_compose_service_def "$service")" || return 1
|
|
master_service="$(get_top_master_service_for_service "$service")"
|
|
|
|
if docker_compose_opts=$(echo "$compose_def" | shyaml get-value -y "docker-compose" 2>/dev/null); then
|
|
yaml_key_val_str "$master_service" "$docker_compose_opts"
|
|
fi | tee "$cache_file"
|
|
if [ "${PIPESTATUS[0]}" != 0 ]; then
|
|
rm "$cache_file"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
|
|
##
|
|
## By Reading the metadata.yml, we create a docker-compose.yml mixin.
|
|
## Some metadata.yml (of subordinates) will indeed modify other
|
|
## services than themselves.
|
|
_get_docker_compose_service_mixin() {
|
|
local service="$1" cache_file="$state_tmpdir/$FUNCNAME.cache.$1" \
|
|
links_yaml base_mixin links_yaml docker_compose_options \
|
|
charm charm_part
|
|
if [ -z "$service" ]; then
|
|
print_syntax_error "$FUNCNAME: Please specify a service as first argument."
|
|
return 1
|
|
fi
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
type=$(get_service_type "$service") || return 1
|
|
[ "$type" == "stub" ] && return 0
|
|
|
|
master_service=$(get_top_master_service_for_service "$service") || {
|
|
err "Failed to get top master service for service $DARKYELLOW$service$NORMAL"
|
|
return 1
|
|
}
|
|
|
|
## The compose part
|
|
|
|
base_mixin="$master_service:
|
|
labels:
|
|
- compose.service=$service
|
|
- compose.master-service=${master_service}
|
|
- compose.project=$(get_default_project_name)"
|
|
links_yaml=$(get_docker_compose_links "$service") || return 1
|
|
docker_compose_options=$(_get_docker_compose_opts "$service") || return 1
|
|
|
|
## the charm part
|
|
|
|
charm_part=$(get_docker_compose_mixin_from_metadata "$service") || return 1
|
|
|
|
## Merge results
|
|
if [ "$charm_part" ]; then
|
|
charm_yaml="$(yaml_key_val_str "$master_service" "$charm_part")" || return 1
|
|
merge_yaml_str "$base_mixin" "$links_yaml" "$charm_yaml" "$docker_compose_options" || return 1
|
|
else
|
|
merge_yaml_str "$base_mixin" "$links_yaml" "$docker_compose_options" || return 1
|
|
fi | tee "$cache_file"
|
|
if [ "${PIPESTATUS[0]}" != 0 ]; then
|
|
err "Failed to constitute the base YAML for service '${DARKYELLOW}$service${NORMAL}'"
|
|
rm "$cache_file"
|
|
return 1
|
|
fi
|
|
|
|
}
|
|
export -f _get_docker_compose_service_mixin
|
|
|
|
|
|
##
|
|
## Get full `docker-compose.yml` format for all listed services (and
|
|
## their deps)
|
|
##
|
|
|
|
## @export
|
|
## @cache: !system !nofail +stdout
|
|
get_docker_compose () {
|
|
if [ -z "$CHARM_STORE_HASH" ]; then
|
|
err-d "Expected \$CHARM_STORE_HASH to be set."
|
|
return 1
|
|
fi
|
|
if [ -z "$COMPOSE_YML_CONTENT_HASH" ]; then
|
|
err-d "Expected \$COMPOSE_YML_CONTENT_HASH to be set."
|
|
return 1
|
|
fi
|
|
local cache_file="$CACHEDIR/$FUNCNAME.cache.$(H "$@" "$CHARM_STORE_HASH" "$COMPOSE_YML_CONTENT_HASH")" \
|
|
entries services service start docker_compose_services
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*) $cache_file"
|
|
touch "$cache_file" || return 1
|
|
cp "$cache_file"{,.wip} || return 1
|
|
export _CURRENT_DOCKER_COMPOSE="$cache_file.wip"
|
|
cat "$cache_file" || return 1
|
|
return 0
|
|
fi
|
|
|
|
##
|
|
## Adding sub services configurations
|
|
##
|
|
|
|
declare -A entries
|
|
start_compilation=$SECONDS
|
|
debug "Compiling 'docker-compose.yml' base for ${DARKYELLOW}$*$NORMAL..."
|
|
for target_service in "$@"; do
|
|
start=$SECONDS
|
|
services=($(get_ordered_service_dependencies "$target_service")) || {
|
|
err "Failed to get dependencies for $DARKYELLOW$target_service$NORMAL"
|
|
return 1
|
|
}
|
|
|
|
if [ "$DEBUG" ]; then
|
|
debug " $DARKYELLOW$target_service$NORMAL deps:$DARKYELLOW" \
|
|
"${services[@]::$((${#services[@]} - 1))}" \
|
|
"$NORMAL$GRAY(in $((SECONDS - start))s)$NORMAL"
|
|
fi
|
|
for service in "${services[@]}"; do
|
|
|
|
if [ "${entries[$service]}" ]; then
|
|
## Prevent double inclusion of same service if this
|
|
## service is deps of two or more of your
|
|
## requirements.
|
|
continue
|
|
fi
|
|
|
|
## mark the service as "loaded" as well as it's containers
|
|
## if this is a subordinate service
|
|
start_service=$SECONDS
|
|
entries[$service]=$(_get_docker_compose_service_mixin "$service") || {
|
|
err "Failed to get service mixin for $DARKYELLOW$service$NORMAL"
|
|
return 1
|
|
}
|
|
debug " Applied $DARKYELLOW$service$NORMAL charm metadata mixins $GRAY(in $((SECONDS - start_service))s)$NORMAL"
|
|
done
|
|
debug " ..finished all mixins for $DARKYELLOW$target_service$NORMAL $GRAY(in $((SECONDS - start))s)$NORMAL"
|
|
done
|
|
|
|
docker_compose_services=$(merge_yaml_str "${entries[@]}") || {
|
|
err "Failed to merge YAML services entries together."
|
|
return 1
|
|
}
|
|
|
|
base_v2="version: '2.1'"
|
|
merge_yaml_str "$(yaml_key_val_str "services" "$docker_compose_services")" \
|
|
"$base_v2" > "$cache_file" || return 1
|
|
|
|
cp "$cache_file"{,.wip} || return 1
|
|
export _CURRENT_DOCKER_COMPOSE="$cache_file.wip"
|
|
cat "$_CURRENT_DOCKER_COMPOSE" || return 1
|
|
debug " ..compilation of base 'docker-compose.yml' done $GRAY(in $((SECONDS - start_compilation))s)$NORMAL" || true
|
|
# debug " ** ${WHITE}docker-compose.yml${NORMAL}:"
|
|
# debug "$_current_docker_compose"
|
|
}
|
|
export -f get_docker_compose
|
|
|
|
|
|
_get_compose_service_def_cached () {
|
|
local service="$1" docker_compose="$2" cache_file="$CACHEDIR/$FUNCNAME.cache.$(echo "$*" | md5_compat)"
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: STATIC cache hit"
|
|
cat "$cache_file" &&
|
|
touch "$cache_file" || return 1
|
|
return 0
|
|
fi
|
|
value=$(echo "$docker_compose" | shyaml get-value "${service//./\\.}" 2>/dev/null)
|
|
[ "$value" == None ] && value=""
|
|
if ! echo "$value" | shyaml get-value "charm" >/dev/null 2>&1; then
|
|
if charm.exists "$service"; then
|
|
value=$(merge_yaml <(echo "charm: $service") <(echo "$value")) || {
|
|
err "Can't merge YAML infered 'charm: $service' with base ${DARKYELLOW}$service${NORMAL} YAML definition."
|
|
return 1
|
|
}
|
|
else
|
|
err "No ${WHITE}charm${NORMAL} value for service $DARKYELLOW$service$NORMAL" \
|
|
"in compose, nor same name charm found."
|
|
return 1
|
|
fi
|
|
fi
|
|
echo "$value" | tee "$cache_file" || return 1
|
|
# if [ "${PIPESTATUS[0]}" != 0 ]; then
|
|
# rm "$cache_file"
|
|
# return 1
|
|
# fi
|
|
return 0
|
|
# if [ "${PIPESTATUS[0]}" != 0 -o \! -s "$cache_file" ]; then
|
|
# rm "$cache_file"
|
|
# err "PAS OK $service: $value"
|
|
# return 1
|
|
# fi
|
|
}
|
|
export -f _get_compose_service_def_cached
|
|
|
|
|
|
## XXXvlab: a lot to be done to cache the results
|
|
get_compose_service_def () {
|
|
if [ -z "$COMBINED_HASH" ]; then
|
|
err-d "Expected \$COMBINED_HASH to be set."
|
|
return 1
|
|
fi
|
|
local service="$1" docker_compose cache_file="$CACHEDIR/$FUNCNAME.cache.$1.$COMBINED_HASH" \
|
|
result
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: SESSION cache hit"
|
|
cat "$cache_file" || return 1
|
|
return 0
|
|
fi
|
|
|
|
[ -z "$service" ] && print_syntax_error "Missing service as first argument."
|
|
docker_compose=$(get_compose_yml_content) || return 1
|
|
result=$(_get_compose_service_def_cached "$service" "$docker_compose") || return 1
|
|
charm=$(e "$result" | shyaml get-value charm 2>/dev/null) || return 1
|
|
metadata=$(charm.metadata "$charm") || return 1
|
|
if default_options=$(printf "%s" "$metadata" | shyaml -y -q get-value default-options); then
|
|
default_options=$(yaml_key_val_str "options" "$default_options") || return 1
|
|
result=$(merge_yaml_str "$default_options" "$result") || return 1
|
|
fi
|
|
echo "$result" | tee "$cache_file" || return 1
|
|
}
|
|
export -f get_compose_service_def
|
|
|
|
|
|
_get_service_charm_cached () {
|
|
local service="$1" service_def="$2" cache_file="$CACHEDIR/$FUNCNAME.cache.$(echo "$*" | md5_compat)"
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit $1"
|
|
cat "$cache_file" &&
|
|
touch "$cache_file" || return 1
|
|
return 0
|
|
fi
|
|
charm=$(echo "$service_def" | shyaml get-value charm 2>/dev/null)
|
|
if [ -z "$charm" ]; then
|
|
err "Missing ${WHITE}charm${NORMAL} value in service $DARKYELLOW$service$NORMAL definition."
|
|
return 1
|
|
fi
|
|
echo "$charm" | tee "$cache_file" || return 1
|
|
}
|
|
export -f _get_service_charm_cached
|
|
|
|
|
|
get_service_charm () {
|
|
local service="$1"
|
|
if [ -z "$service" ]; then
|
|
print_syntax_error "$FUNCNAME: Please specify a service as first argument."
|
|
return 1
|
|
fi
|
|
service_def=$(get_compose_service_def "$service") || return 1
|
|
_get_service_charm_cached "$service" "$service_def"
|
|
}
|
|
export -f get_service_charm
|
|
|
|
|
|
## built above the docker-compose abstraction, so it relies on the
|
|
## full docker-compose.yml to be already built.
|
|
get_service_def () {
|
|
local service="$1" def
|
|
if [ -z "$_CURRENT_DOCKER_COMPOSE" ]; then
|
|
err "${FUNCNAME[0]} is meant to be called after"\
|
|
"\$_CURRENT_DOCKER_COMPOSE has been calculated."
|
|
echo " Called by:" >&2
|
|
printf " - %s\n" "${FUNCNAME[@]:1}" >&2
|
|
return 1
|
|
fi
|
|
|
|
def=$(cat "$_CURRENT_DOCKER_COMPOSE" | shyaml get-value "services.${service//./\\.}" 2>/dev/null)
|
|
if [ -z "$def" ]; then
|
|
err "No definition for service $DARKYELLOW$service$NORMAL in compiled 'docker-compose.yml'."
|
|
return 1
|
|
fi
|
|
echo "$def"
|
|
}
|
|
export -f get_service_def
|
|
|
|
get_build_hash() {
|
|
local dir="$1" cache_file="$state_tmpdir/$FUNCNAME.cache.$(H "$1")" hash
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
## Check that there's a Dockerfile in this directory
|
|
if [ ! -e "$dir/Dockerfile" ]; then
|
|
err "No 'Dockerfile' found in '$dir'."
|
|
return 1
|
|
fi
|
|
## use find to md5sum all files in the directory and make a final hash
|
|
hash=$(set -o pipefail; cd "$dir"; env -i find "." -type f -exec md5sum {} \; |
|
|
sort | md5sum | awk '{print $1}') || {
|
|
err "Failed to get hash for '$dir'."
|
|
return 1
|
|
}
|
|
printf "%s" "$hash" | tee "$cache_file"
|
|
return $?
|
|
}
|
|
export -f get_build_hash
|
|
|
|
### Query/Get cached image from registry
|
|
##
|
|
## Returns on stdout the name of the image if found, or an empty string if not
|
|
cache:image:registry:get() {
|
|
local charm="$1" hash="$2" service="$3"
|
|
local charm_image_name="cache/charm/$charm"
|
|
local charm_image="$charm_image_name:$hash"
|
|
|
|
Elt "pulling ${DARKPINK}$charm${NORMAL} image from $COMPOSE_DOCKER_REGISTRY" >&2
|
|
if out=$(docker pull "$COMPOSE_DOCKER_REGISTRY/$charm_image" 2>&1); then
|
|
docker tag "$COMPOSE_DOCKER_REGISTRY/$charm_image" "$charm_image" || {
|
|
err "Failed set image '$COMPOSE_DOCKER_REGISTRY/$charm_image' as '$charm_image'" \
|
|
"for ${DARKYELLOW}$service${NORMAL}."
|
|
return 1
|
|
}
|
|
print_info "found" >&2
|
|
print_status success >&2
|
|
Feed >&2
|
|
printf "%s" "$charm_image" | tee "$cache_file"
|
|
return $?
|
|
fi
|
|
if [[ "$out" != *"manifest unknown"* ]] && [[ "$out" != *"not found"* ]]; then
|
|
print_status failure >&2
|
|
Feed >&2
|
|
err "Failed to pull image '$COMPOSE_DOCKER_REGISTRY/$charm_image'" \
|
|
"for ${DARKYELLOW}$service${NORMAL}:"
|
|
e "$out"$'\n' | prefix " ${GRAY}|${NORMAL} " >&2
|
|
return 1
|
|
fi
|
|
print_info "not found" >&2
|
|
if test "$type_method" = "long"; then
|
|
__status="[${NOOP}ABSENT${NORMAL}]"
|
|
else
|
|
echo -n "${NOOP}"
|
|
shift; shift;
|
|
echo -n "$*${NORMAL}"
|
|
fi >&2
|
|
Feed >&2
|
|
}
|
|
export -f cache:image:registry:get
|
|
|
|
### Store cached image on registry
|
|
##
|
|
## Returns nothing
|
|
cache:image:registry:put() {
|
|
if [ -n "$COMPOSE_DOCKER_REGISTRY" ] && [ -n "$COMPOSE_PUSH_TO_REGISTRY" ]; then
|
|
local charm="$1" hash="$2" service="$3"
|
|
local charm_image_name="cache/charm/$charm"
|
|
local charm_image="$charm_image_name:$hash"
|
|
|
|
Wrap -d "pushing ${DARKPINK}$charm${NORMAL} image to $COMPOSE_DOCKER_REGISTRY" <<EOF || return 1
|
|
docker tag "$charm_image" "$COMPOSE_DOCKER_REGISTRY/$charm_image" &&
|
|
docker push "$COMPOSE_DOCKER_REGISTRY/$charm_image"
|
|
EOF
|
|
fi >&2
|
|
}
|
|
export -f cache:image:registry:put
|
|
|
|
|
|
### Produce docker cached charm image 'cache/charm/$charm:$hash'
|
|
##
|
|
## Either by fetching it from a registry or by building it from a
|
|
## Dockerfile.
|
|
cache:image:produce() {
|
|
local type="$1" src="$2" charm="$3" hash="$4" service="$5"
|
|
local charm_image_name="cache/charm/$charm"
|
|
local charm_image="$charm_image_name:$hash"
|
|
|
|
case "$type" in
|
|
fetch)
|
|
local specified_image="$src"
|
|
## will not pull upstream image if already present locally
|
|
if ! docker_has_image "${specified_image}"; then
|
|
if ! out=$(docker pull "${specified_image}" 2>&1); then
|
|
err "Failed to pull image '$specified_image' for ${DARKYELLOW}$service${NORMAL}:"
|
|
echo "$out" | prefix " | " >&2
|
|
return 1
|
|
fi
|
|
fi
|
|
|
|
# specified_image_id=$(docker_image_id "$specified_image") || return 1
|
|
# charm_image_id=
|
|
# if docker_has_image "${image_dst}"; then
|
|
# charm_image_id=$(docker_image_id "${image_dst}") || return 1
|
|
# fi
|
|
# if [ "$specified_image_id" != "$charm_image_id" ]; then
|
|
docker tag "$specified_image" "${charm_image}" || return 1
|
|
# fi
|
|
;;
|
|
build)
|
|
local service_build="$src"
|
|
build_opts=()
|
|
if [ "$COMPOSE_ACTION" == "build" ]; then
|
|
while read-0 arg; do
|
|
case "$arg" in
|
|
-t|--tag)
|
|
## XXXvlab: doesn't seem to be actually a valid option
|
|
if [ -n "$COMPOSE_PUSH_TO_REGISTRY" ]; then
|
|
err "You can't use -t|--tag option when pushing to a registry."
|
|
exit 1
|
|
fi
|
|
has_named_image=true
|
|
read-0 val ## should always be okay because already checked
|
|
build_opts+=("$arg" "$val")
|
|
;;
|
|
--help|-h)
|
|
docker-compose "$action" --help |
|
|
filter_docker_compose_help_message >&2
|
|
exit 0
|
|
;;
|
|
--*|-*)
|
|
if str_pattern_matches "$arg" $DC_MATCH_MULTI; then
|
|
read-0 value
|
|
build_opts+=("$arg" "$value")
|
|
shift
|
|
elif str_pattern_matches "$arg" $DC_MATCH_SINGLE; then
|
|
build_opts+=("$arg")
|
|
else
|
|
err "Unexpected error while parsing a second time the build arguments."
|
|
fi
|
|
;;
|
|
*)
|
|
## Already parsed
|
|
build_opts+=("$arg")
|
|
;;
|
|
esac
|
|
done < <(cla.normalize "${action_opts[@]}")
|
|
fi
|
|
if [ -z "$has_named_image" ]; then
|
|
build_opts+=(-t "${charm_image}")
|
|
fi
|
|
|
|
Wrap -v -d "Building ${DARKPINK}$charm${NORMAL}:$hash image" -- \
|
|
docker build "$service_build" -t "${charm_image}" "${build_opts[@]}" >&2 || {
|
|
err "Failed to build image '${charm_image}' for ${DARKYELLOW}$service${NORMAL}."
|
|
return 1
|
|
}
|
|
if [ -n "$has_named_image" ]; then
|
|
exit 0
|
|
fi
|
|
;;
|
|
*)
|
|
err "Unknown type '$type'."
|
|
return 1
|
|
;;
|
|
esac
|
|
}
|
|
export -f cache:image:produce
|
|
|
|
## Will modify current $_CURRENT_DOCKER_COMPOSE file
|
|
service_ensure_image_ready() {
|
|
if [ -z "$COMBINED_HASH" ]; then
|
|
err "Expected \$COMBINED_HASH to be set."
|
|
return 1
|
|
fi
|
|
local service="$1" cache_file="$CACHEDIR/$FUNCNAME.cache.$1.$COMBINED_HASH" \
|
|
master_service service_def service_image service_build service_dockerfile image \
|
|
specified_image specified_image_id charm_image_name hash \
|
|
service_quoted
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: cache hit ($*)"
|
|
touch "$cache_file" || return 1
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
if [ -z "$_CURRENT_DOCKER_COMPOSE" ]; then
|
|
err "${FUNCNAME[0]} is meant to be called after"\
|
|
"\$_CURRENT_DOCKER_COMPOSE has been calculated."
|
|
echo " Called by:" >&2
|
|
printf " - %s\n" "${FUNCNAME[@]:1}" >&2
|
|
return 1
|
|
fi
|
|
|
|
master_service="$(get_top_master_service_for_service "$service")" || {
|
|
err "Could not compute master service for service $DARKYELLOW$service$NORMAL."
|
|
return 1
|
|
}
|
|
if [ "$master_service" != "$service" ]; then
|
|
image=$(service_ensure_image_ready "$master_service") || return 1
|
|
printf "%s" "$image" | tee "$cache_file"
|
|
return $?
|
|
fi
|
|
|
|
## check if \$_CURRENT_DOCKER_COMPOSE's service def is already correctly setup
|
|
local charm="$(get_service_charm "$service")" || return 1
|
|
local charm_image_name="cache/charm/$charm" || return 1
|
|
local service_def="$(get_service_def "$service")" || {
|
|
err "Could not get docker-compose service definition for $DARKYELLOW$service$NORMAL."
|
|
return 1
|
|
}
|
|
local service_quoted=${service//./\\.}
|
|
|
|
if specified_image=$(echo "$service_def" | shyaml get-value image 2>/dev/null); then
|
|
if [ "$specified_image" == "$charm_image_name"* ]; then
|
|
## Assume we already did the change
|
|
printf "%s" "$specified_image" | tee "$cache_file"
|
|
return 0
|
|
fi
|
|
if [[ "$specified_image" == "${COMPOSE_DOCKER_REGISTRY}/"* ]]; then
|
|
if ! docker_has_image "${specified_image}"; then
|
|
Wrap "${wrap_opts[@]}" \
|
|
-v -d "pulling ${DARKPINK}$charm${NORMAL}'s specified image from $COMPOSE_DOCKER_REGISTRY" -- \
|
|
docker pull "${specified_image}" >&2 || return 1
|
|
else
|
|
if [ -n "$DEBUG" ]; then
|
|
Elt "using local ${DARKPINK}$charm${NORMAL}'s specified image from $COMPOSE_DOCKER_REGISTRY" >&2
|
|
print_status noop >&2
|
|
Feed >&2
|
|
fi
|
|
fi
|
|
## Already on the cache server
|
|
printf "%s" "$specified_image" | tee "$cache_file"
|
|
return 0
|
|
fi
|
|
src="$specified_image"
|
|
hash=$(echo "$specified_image" | md5sum | cut -f 1 -d " ") || return 1
|
|
type=fetch
|
|
|
|
## replace image by charm image
|
|
yq -i ".services.[\"${service_quoted}\"].image = \"${charm_image_name}:${hash}\"" \
|
|
"$_CURRENT_DOCKER_COMPOSE" || return 1
|
|
else
|
|
|
|
if ! src=$(echo "$service_def" | shyaml get-value build 2>/dev/null); then
|
|
err "Service $DARKYELLOW$service$NORMAL has no ${WHITE}image${NORMAL} nor ${WHITE}build${NORMAL} parameter."
|
|
echo "$service_def" >&2
|
|
return 1
|
|
fi
|
|
|
|
## According to https://stackoverflow.com/questions/32230577 , if there's a build,
|
|
## then the built image will get name ${project}_${service}
|
|
hash=$(get_build_hash "$src") || return 1
|
|
type=build
|
|
## delete build key from service_def and add image to charm_image_name
|
|
yq -i "del(.services.[\"${service_quoted}\"].build) |
|
|
.services.[\"${service_quoted}\"].image = \"${charm_image_name}:${hash}\"" \
|
|
"$_CURRENT_DOCKER_COMPOSE" || return 1
|
|
|
|
fi
|
|
if [ "$COMPOSE_ACTION" != "build" ] && docker_has_image "${charm_image_name}:${hash}"; then
|
|
if [ -n "$DEBUG" ]; then
|
|
Elt "using ${DARKPINK}$charm${NORMAL}'s image from local cache" >&2
|
|
print_status noop >&2
|
|
Feed >&2
|
|
fi
|
|
cache:image:registry:put "$charm" "$hash" "$service" || return 1
|
|
printf "%s" "${charm_image_name}:${hash}" | tee "$cache_file"
|
|
return $?
|
|
fi
|
|
|
|
## Can we pull it ? Let's check on $COMPOSE_DOCKER_REGISTRY
|
|
if [ "$COMPOSE_ACTION" != "build" ] && [ -n "$COMPOSE_DOCKER_REGISTRY" ]; then
|
|
img=$(cache:image:registry:get "$charm" "$hash" "$service") || {
|
|
err "Failed to get image '$charm_image_name:$hash' from registry for ${DARKYELLOW}$service${NORMAL}."
|
|
return 1
|
|
}
|
|
[ -n "$img" ] && {
|
|
printf "%s" "$img" | tee "$cache_file"
|
|
return $?
|
|
}
|
|
fi
|
|
cache:image:produce "$type" "$src" "$charm" "$hash" "$service" || return 1
|
|
cache:image:registry:put "$charm" "$hash" "$service" || return 1
|
|
printf "%s" "${charm_image_name}:$hash" | tee "$cache_file"
|
|
return $?
|
|
}
|
|
export -f service_ensure_image_ready
|
|
|
|
|
|
get_charm_relation_def () {
|
|
local charm="$1" relation_name="$2" cache_file="$state_tmpdir/$FUNCNAME.cache.$1.$2" \
|
|
relation_def metadata
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
metadata="$(charm.metadata "$charm")" || return 1
|
|
relation_def="$(echo "$metadata" | shyaml get-value "provides.${relation_name}" 2>/dev/null)"
|
|
echo "$relation_def" | tee "$cache_file"
|
|
}
|
|
export -f get_charm_relation_def
|
|
|
|
|
|
get_charm_tech_dep_orientation_for_relation() {
|
|
local charm="$1" relation_name="$2" cache_file="$state_tmpdir/$FUNCNAME.cache.$1.$2" \
|
|
relation_def value
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
relation_def=$(get_charm_relation_def "$charm" "$relation_name" 2>/dev/null)
|
|
value=$(echo "$relation_def" | shyaml get-value 'tech-dep' 2>/dev/null)
|
|
value=${value:-True}
|
|
printf "%s" "$value" | tee "$cache_file"
|
|
}
|
|
export -f get_charm_tech_dep_orientation_for_relation
|
|
|
|
|
|
get_service_relation_tech_dep() {
|
|
local service="$1" relation_name="$2" cache_file="$state_tmpdir/$FUNCNAME.cache.$1.$2" \
|
|
charm tech_dep
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
charm=$(get_service_charm "$service") || return 1
|
|
tech_dep="$(get_charm_tech_dep_orientation_for_relation "$charm" "$relation_name")" || return 1
|
|
printf "%s" "$tech_dep" | tee "$cache_file"
|
|
}
|
|
export -f get_service_relation_tech_dep
|
|
|
|
|
|
##
|
|
## Use compose file to get deps, and relation definition in metadata.yml
|
|
## for tech-dep attribute.
|
|
get_service_deps() {
|
|
local service="$1" cache_file="$state_tmpdir/$FUNCNAME.cache.$(H "$@" "$GLOBAL_ALL_RELATIONS_HASH")"
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
(
|
|
set -o pipefail
|
|
get_service_relations "$service" | \
|
|
while read-0 relation_name target_service _relation_config tech_dep; do
|
|
echo "$target_service"
|
|
done | tee "$cache_file"
|
|
) || return 1
|
|
}
|
|
export -f get_service_deps
|
|
|
|
## XXXvlab: cache was disabled because improper. Indeed, this needs to cache
|
|
## 'depths' full state. Second, it should be
|
|
_rec_get_depth() {
|
|
local elt=$1 dep deps max
|
|
[ "${depths[$elt]}" ] && return 0
|
|
local cache_file="$state_tmpdir/$FUNCNAME.cache.$(H "$@" "$(declare -pA depths)" "$GLOBAL_ALL_RELATIONS_HASH")"
|
|
if [ -e "$cache_file.depths" ]; then
|
|
#debug "$FUNCNAME: cache hit ($*) - $cache_file.depths"
|
|
while read-0 k v; do
|
|
depths["$k"]="$v"
|
|
done < "$cache_file.depths"
|
|
while read-0 k v; do
|
|
visited["$k"]="$v"
|
|
done < "$cache_file.visited"
|
|
return 0
|
|
fi
|
|
|
|
visited[$elt]=1
|
|
#debug "Setting visited[$elt]"
|
|
#debug "Asking for $DARKYELLOW$elt$NORMAL dependencies"
|
|
deps=$(get_service_deps "$elt") || {
|
|
debug "Failed get_service_deps $elt"
|
|
return 1
|
|
}
|
|
# debug "$elt deps are:" $deps
|
|
max=0
|
|
for dep in $deps; do
|
|
[ "${visited[$dep]}" ] && {
|
|
#debug "Already computing $dep"
|
|
continue
|
|
}
|
|
_rec_get_depth "$dep" || return 1
|
|
#debug "Requesting depth[$dep]"
|
|
if (( ${depths[$dep]} > max )); then
|
|
max="${depths[$dep]}"
|
|
fi
|
|
done
|
|
# debug "Setting depth[$elt] to $((max + 1))"
|
|
depths[$elt]=$((max + 1))
|
|
array_kv_to_stdin depths > "$cache_file.depths"
|
|
array_kv_to_stdin visited > "$cache_file.visited"
|
|
# debug "DEPTHS: $(declare -pA depths)"
|
|
# debug "$FUNCNAME: caching hit ($*) - $cache_file"
|
|
}
|
|
export -f _rec_get_depth
|
|
|
|
|
|
get_ordered_service_dependencies() {
|
|
local services=("$@") cache_file="$state_tmpdir/$FUNCNAME.cache.$(H "$@" "$GLOBAL_ALL_RELATIONS_HASH")" \
|
|
i value key heads depths visited
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
#debug "Figuring ordered deps of $DARKYELLOW${services[@]}$NORMAL"
|
|
if [ -z "${services[*]}" ]; then
|
|
return 0
|
|
# print_syntax_error "$FUNCNAME: no arguments"
|
|
# return 1
|
|
fi
|
|
|
|
declare -A depths
|
|
declare -A visited
|
|
heads=("${services[@]}")
|
|
while [ "${#heads[@]}" != 0 ]; do
|
|
array_pop heads head
|
|
_rec_get_depth "$head" || return 1
|
|
done
|
|
|
|
i=0
|
|
while [ "${#depths[@]}" != 0 ]; do
|
|
for key in "${!depths[@]}"; do
|
|
value="${depths[$key]}"
|
|
if [ "$value" == "$i" ]; then
|
|
echo "$key"
|
|
unset depths[$key]
|
|
fi
|
|
done
|
|
((i++))
|
|
done | tee "$cache_file"
|
|
}
|
|
export -f get_ordered_service_dependencies
|
|
|
|
## Modify $_CURRENT_DOCKER_COMPOSE file, and fills cache
|
|
run_service_acquire_images () {
|
|
local service subservice subservices loaded
|
|
_CURRENT_DOCKER_COMPOSE_HASH=$(hash_get < "$_CURRENT_DOCKER_COMPOSE")
|
|
local cache_file="$CACHEDIR/$FUNCNAME.cache.$(H "$@" "$_CURRENT_DOCKER_COMPOSE_HASH" "$COMBINED_HASH")"
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
touch "$cache_file" || return 1
|
|
cp "$cache_file" "$_CURRENT_DOCKER_COMPOSE" || return 1
|
|
return 0
|
|
fi
|
|
declare -A loaded
|
|
for service in "$@"; do
|
|
subservices=$(get_ordered_service_dependencies "$service") || return 1
|
|
for subservice in $subservices; do
|
|
if [ "${loaded[$subservice]}" ]; then
|
|
## Prevent double inclusion of same service if this
|
|
## service is deps of two or more of your
|
|
## requirements.
|
|
continue
|
|
fi
|
|
|
|
type=$(get_service_type "$subservice") || return 1
|
|
MASTER_BASE_SERVICE_NAME=$(get_top_master_service_for_service "$subservice") || return 1
|
|
if [ "$type" != "stub" ]; then
|
|
DOCKER_BASE_IMAGE=$(service_ensure_image_ready "$MASTER_BASE_SERVICE_NAME") || return 1
|
|
fi
|
|
|
|
loaded[$subservice]=1
|
|
done
|
|
done
|
|
cp "$_CURRENT_DOCKER_COMPOSE" "$cache_file" || return 1
|
|
return 0
|
|
}
|
|
|
|
run_service_hook () {
|
|
local action="$1" service subservice subservices loaded
|
|
shift
|
|
declare -A loaded
|
|
for service in "$@"; do
|
|
subservices=$(get_ordered_service_dependencies "$service") || return 1
|
|
for subservice in $subservices; do
|
|
if [ "${loaded[$subservice]}" ]; then
|
|
## Prevent double inclusion of same service if this
|
|
## service is deps of two or more of your
|
|
## requirements.
|
|
continue
|
|
fi
|
|
|
|
charm=$(get_service_charm "$subservice") || return 1
|
|
charm.has_hook "$charm" "$action" >/dev/null || continue
|
|
type=$(get_service_type "$subservice") || return 1
|
|
|
|
PROJECT_NAME=$(get_default_project_name) || return 1
|
|
MASTER_BASE_SERVICE_NAME=$(get_top_master_service_for_service "$subservice") || return 1
|
|
MASTER_BASE_CHARM_NAME=$(get_service_charm "$MASTER_BASE_SERVICE_NAME") || return 1
|
|
if [ "$type" != "stub" ]; then
|
|
DOCKER_BASE_IMAGE=$(service_ensure_image_ready "$MASTER_BASE_SERVICE_NAME") || return 1
|
|
fi
|
|
|
|
Wrap "${wrap_opts[@]}" -d "running $YELLOW$action$NORMAL hook of $DARKYELLOW$subservice$NORMAL in charm $DARKPINK$charm$NORMAL" <<EOF || return 1
|
|
|
|
export DOCKER_BASE_IMAGE="$DOCKER_BASE_IMAGE"
|
|
export SERVICE_NAME=$subservice
|
|
export IMAGE_NAME=$(echo "${PROJECT_NAME}" | tr -d "_-")_\${SERVICE_NAME}
|
|
export CONTAINER_NAME=\${IMAGE_NAME}_1
|
|
export CHARM_NAME="$charm"
|
|
export PROJECT_NAME="$PROJECT_NAME"
|
|
export SERVICE_DATASTORE="$DATASTORE/$subservice"
|
|
export SERVICE_CONFIGSTORE="$CONFIGSTORE/$subservice"
|
|
export MASTER_BASE_SERVICE_NAME="$MASTER_BASE_SERVICE_NAME"
|
|
export MASTER_BASE_CHARM_NAME="$MASTER_BASE_CHARM_NAME"
|
|
|
|
charm.run_hook "local" "$charm" "$action"
|
|
|
|
EOF
|
|
loaded[$subservice]=1
|
|
done
|
|
done
|
|
return 0
|
|
}
|
|
|
|
|
|
host_resource_get() {
|
|
local location="$1" cfg="$2"
|
|
type=$(echo "$cfg" | shyaml get-value type 2>/dev/null) || {
|
|
err "Missing ${WHITE}type$NORMAL option in ${WHITE}get$NORMAL config for location '$location'"
|
|
return 1
|
|
}
|
|
if fn.exists host_resource_get_$type; then
|
|
host_resource_get_$type "$location" "$cfg"
|
|
else
|
|
err "Source ${WHITE}source$NORMAL type '$type' unknown for" \
|
|
"${WHITE}host-resource$NORMAL '$location' defined in" \
|
|
"$DARKYELLOW$subservice$NORMAL config."
|
|
return 1
|
|
fi
|
|
}
|
|
export -f host_resource_get
|
|
|
|
|
|
host_resource_get_git() {
|
|
local location="$1" cfg="$2" branch parent url
|
|
branch=$(echo "$cfg" | shyaml get-value branch 2>/dev/null)
|
|
branch=${branch:-master}
|
|
|
|
url=$(echo "$cfg" | shyaml get-value url 2>/dev/null)
|
|
parent="$(dirname "$location")"
|
|
(
|
|
mkdir -p "$parent" && cd "$parent" &&
|
|
git clone -b "$branch" "$url" "$(basename "$location")"
|
|
) || return 1
|
|
}
|
|
export -f host_resource_get_git
|
|
|
|
|
|
host_resource_get_git-sub() {
|
|
local location="$1" cfg="$2" branch parent url
|
|
branch=$(echo "$cfg" | shyaml get-value branch 2>/dev/null)
|
|
branch=${branch:-master}
|
|
|
|
url=$(echo "$cfg" | shyaml get-value url 2>/dev/null)
|
|
parent="$(dirname "$location")"
|
|
(
|
|
mkdir -p "$parent" && cd "$parent" &&
|
|
git sub clone -b "$branch" "$url" "$(basename "$location")"
|
|
) || return 1
|
|
}
|
|
export -f host_resource_get_git-sub
|
|
|
|
|
|
setup_host_resource () {
|
|
local subservice="$1" service_def location get cfg
|
|
|
|
service_def=$(get_compose_service_def "$subservice") || return 1
|
|
while read-0 location cfg; do
|
|
## XXXvlab: will it be a git resources always ?
|
|
if [ -d "$location" -a ! -d "$location/.git" ]; then
|
|
err "Hum, location '$location' does not seem to be a git directory."
|
|
return 1
|
|
fi
|
|
if [ -d "$location" ]; then
|
|
info "host resource '$location' already set up."
|
|
continue
|
|
fi
|
|
get=$(echo "$cfg" | shyaml get-value get 2>/dev/null)
|
|
|
|
if [ -z "$get" ]; then
|
|
err "No host directory '$location' found, and no ${WHITE}source$NORMAL" \
|
|
"specified for $DARKYELLOW$subservice$NORMAL."
|
|
return 1
|
|
fi
|
|
host_resource_get "$location" "$get" || return 1
|
|
done < <(echo "$service_def" | shyaml key-values-0 host-resources 2>/dev/null)
|
|
}
|
|
export -f setup_host_resource
|
|
|
|
|
|
setup_host_resources () {
|
|
local service subservices subservice loaded
|
|
|
|
declare -A loaded
|
|
for service in "$@"; do
|
|
subservices=$(get_ordered_service_dependencies "$service") || return 1
|
|
for subservice in $subservices; do
|
|
if [ "${loaded[$subservice]}" ]; then
|
|
## Prevent double inclusion of same service if this
|
|
## service is deps of two or more of your
|
|
## requirements.
|
|
continue
|
|
fi
|
|
setup_host_resource "$subservice" || return 1
|
|
loaded[$subservice]=1
|
|
done
|
|
done
|
|
return 0
|
|
}
|
|
export -f setup_host_resources
|
|
|
|
## Works on stdin
|
|
cfg-get-value () {
|
|
local key="$1" out
|
|
if [ -z "$key" ]; then
|
|
yaml_get_interpret || return 1
|
|
return 0
|
|
fi
|
|
if ! out=$(shyaml -y get-value "$key" 2>/dev/null); then
|
|
err "The key $WHITE$key$NORMAL was not found in relation's data."
|
|
return 1
|
|
fi
|
|
printf "%s\n" "$out" | yaml_get_interpret
|
|
}
|
|
export -f cfg-get-value
|
|
|
|
|
|
relation-get () {
|
|
if [ -z "$RELATION_DATA_FILE" ]; then
|
|
err-d "$FUNCNAME: var \$RELATION_DATA_FILE is not set."
|
|
return 1
|
|
fi
|
|
cfg-get-value "$1" < "$RELATION_DATA_FILE"
|
|
}
|
|
export -f relation-get
|
|
|
|
|
|
expand_vars() {
|
|
local unlikely_prefix="UNLIKELY_PREFIX"
|
|
content=$(cat -)
|
|
## find first identifier not in content
|
|
remaining_lines=$(echo "$content" | grep "^$unlikely_prefix")
|
|
while [ "$(echo "$remaining_lines" | grep "^$unlikely_prefix$")" ]; do
|
|
size_prefix="${#unlikely_prefix}"
|
|
first_matching=$(echo "$remaining_lines" |
|
|
grep -v "^$unlikely_prefix$" |
|
|
uniq -w "$((size_prefix + 1))" -c |
|
|
sort -rn |
|
|
head -n 1)
|
|
first_matching=${first_matching#"${x%%[![:space:]]*}"}
|
|
first_matching="${first_matching#* }"
|
|
next_char=${first_matching:$size_prefix:1}
|
|
if [ "$next_char" != "0" ]; then
|
|
unlikely_prefix+="0"
|
|
else
|
|
unlikely_prefix+="1"
|
|
fi
|
|
remaining_lines=$(echo "$remaining_lines" | grep "^$unlikely_prefix")
|
|
done
|
|
eval "cat <<$unlikely_prefix
|
|
$content
|
|
$unlikely_prefix"
|
|
}
|
|
export -f expand_vars
|
|
|
|
|
|
yaml_get_interpret() {
|
|
local content tag
|
|
content=$(cat -)
|
|
tag=$(echo "$content" | shyaml get-type) || return 1
|
|
content=$(echo "$content" | shyaml get-value) || return 1
|
|
if ! [ "${tag:0:1}" == "!" ]; then
|
|
echo "$content" || return 1
|
|
return 0
|
|
fi
|
|
case "$tag" in
|
|
"!bash-stdout")
|
|
echo "$content" | bash || {
|
|
err "shell code didn't end with errorlevel 0"
|
|
return 1
|
|
}
|
|
;;
|
|
"!var-expand")
|
|
echo "$content" | expand_vars || {
|
|
err "shell expansion failed"
|
|
return 1
|
|
}
|
|
;;
|
|
"!file-content")
|
|
source=$(echo "$content" | expand_vars) || {
|
|
err "shell expansion failed"
|
|
return 1
|
|
}
|
|
cat "$source" || return 1
|
|
;;
|
|
*)
|
|
err "Invalid object tag ${WHITE}$tag${NORMAL}"
|
|
return 1
|
|
;;
|
|
esac
|
|
}
|
|
export -f yaml_get_interpret
|
|
|
|
|
|
options-get () {
|
|
local key="$1" out
|
|
service_def=$(get_compose_service_def "$SERVICE_NAME") || return 1
|
|
if ! out=$(echo "$service_def" | shyaml -y get-value "options.$key" 2>/dev/null); then
|
|
err "The key $WHITE$key$NORMAL was not found in base service compose definition.."
|
|
return 1
|
|
fi
|
|
echo "$out" | yaml_get_interpret
|
|
}
|
|
export -f options-get
|
|
|
|
|
|
relation-base-compose-get () {
|
|
local key="$1" out
|
|
if ! out=$(echo "$RELATION_BASE_COMPOSE_DEF" | shyaml -y get-value "options.$key" 2>/dev/null); then
|
|
err "The key $WHITE$key$NORMAL was not found in base service compose definition.."
|
|
return 1
|
|
fi
|
|
echo "$out" | yaml_get_interpret
|
|
}
|
|
export -f relation-base-compose-get
|
|
|
|
|
|
relation-target-compose-get () {
|
|
local key="$1" out
|
|
if ! out=$(echo "$RELATION_TARGET_COMPOSE_DEF" | shyaml -y get-value "options.$key" 2>/dev/null); then
|
|
err "The key $WHITE$key$NORMAL was not found in base service compose definition.."
|
|
return 1
|
|
fi
|
|
echo "$out" | yaml_get_interpret
|
|
}
|
|
export -f relation-target-compose-get
|
|
|
|
|
|
relation-set () {
|
|
local key="$1" value="$2"
|
|
if [ -z "$RELATION_DATA_FILE" ]; then
|
|
err "$FUNCNAME: relation does not seems to be correctly setup."
|
|
return 1
|
|
fi
|
|
|
|
if ! [ -r "$RELATION_DATA_FILE" ]; then
|
|
err "$FUNCNAME: can't read relation's data." >&2
|
|
return 1
|
|
fi
|
|
|
|
_config_merge "$RELATION_DATA_FILE" <(yaml_key_val_str "$key" "$value")
|
|
}
|
|
export -f relation-set
|
|
|
|
|
|
_config_merge() {
|
|
local config_filename="$1" mixin="$2"
|
|
touch "$config_filename" &&
|
|
merge_yaml "$config_filename" "$mixin" > "$config_filename.tmp" || return 1
|
|
mv "$config_filename.tmp" "$config_filename"
|
|
}
|
|
export -f _config_merge
|
|
|
|
|
|
## XXXvlab; this can be used only in relation, I'd like to use it in init.
|
|
config-add() {
|
|
local metadata="$1"
|
|
_config_merge "$RELATION_CONFIG" <(echo "$metadata")
|
|
}
|
|
export -f config-add
|
|
|
|
|
|
## XXXvlab; this can be used only in relation, I'd like to use it in init.
|
|
init-config-add() {
|
|
local metadata="$1"
|
|
_config_merge "$state_tmpdir/to-merge-in-docker-compose.yml" \
|
|
<(yaml_key_val_str "services" "$metadata")
|
|
}
|
|
export -f init-config-add
|
|
|
|
|
|
docker_get_uid() {
|
|
local service="$1" user="$2" uid
|
|
uid=$(cached_cmd_on_base_image "$service" "id -u \"$user\"") || {
|
|
debug "Failed to query for '$user' uid in ${DARKYELLOW}$service${NORMAL} base image."
|
|
return 1
|
|
}
|
|
info "uid from ${DARKYELLOW}$service${NORMAL} for user '$user' is '$uid'"
|
|
echo "$uid"
|
|
}
|
|
export -f docker_get_uid
|
|
|
|
docker_get_uid_gid() {
|
|
local service="$1" user="$2" group="$3" uid
|
|
uid_gid=$(cached_cmd_on_base_image "$service" "id -u \"$user\"; id -g \"$group\"") || {
|
|
debug "Failed to query for '$user' uid in ${DARKYELLOW}$service${NORMAL} base image."
|
|
return 1
|
|
}
|
|
info "uid from ${DARKYELLOW}$service${NORMAL} for user '$user' is '$uid_gid'"
|
|
echo "$uid_gid"
|
|
}
|
|
export -f docker_get_uid_gid
|
|
|
|
|
|
logstdout() {
|
|
local name="$1"
|
|
sed -r 's%^%'"${name}"'> %g'
|
|
}
|
|
export -f logstdout
|
|
|
|
|
|
logstderr() {
|
|
local name="$1"
|
|
sed -r 's%^(.*)$%'"${RED}${name}>${NORMAL} \1"'%g'
|
|
}
|
|
export -f logstderr
|
|
|
|
|
|
_run_service_relation () {
|
|
local relation_name="$1" service="$2" target_service="$3" relation_config="$4" relation_dir services
|
|
local errlvl
|
|
|
|
charm=$(get_service_charm "$service") || return 1
|
|
target_charm=$(get_service_charm "$target_service") || return 1
|
|
|
|
base_script_name=$(charm.has_relation_hook "$charm" "$relation_name" relation-joined) || true
|
|
target_script_name=$(charm.has_relation_hook "$target_charm" "$relation_name" relation-joined) || true
|
|
[ -n "$base_script_name" ] || [ -n "$target_script_name" ] || return 0
|
|
|
|
relation_dir=$(get_relation_data_dir "$service" "$target_service" "$relation_name") || return 1
|
|
RELATION_DATA_FILE=$(get_relation_data_file "$service" "$target_service" "$relation_name" "$relation_config") || return 1
|
|
export BASE_SERVICE_NAME=$service
|
|
export BASE_CHARM_NAME=$charm
|
|
export BASE_CHARM_PATH=$(charm.get_dir "$charm")
|
|
export TARGET_SERVICE_NAME=$target_service
|
|
export TARGET_CHARM_NAME=$target_charm
|
|
export TARGET_CHARM_PATH=$(charm.get_dir "$target_charm")
|
|
export RELATION_DATA_FILE
|
|
target_errlvl=0
|
|
|
|
if [ -z "$target_script_name" ]; then
|
|
verb "No relation script $DARKBLUE$relation_name$NORMAL in target $DARKPINK$target_charm$NORMAL."
|
|
else
|
|
verb "Running ${DARKBLUE}$relation_name${NORMAL} relation-joined script" \
|
|
"for target $DARKYELLOW$target_service$NORMAL (charm $DARKPINK$target_charm$NORMAL)"
|
|
RELATION_CONFIG="$relation_dir/config_provider"
|
|
type=$(get_service_type "$target_service") || return 1
|
|
if [ "$type" != "stub" ]; then
|
|
DOCKER_BASE_IMAGE=$(service_ensure_image_ready "$target_service") || return 1
|
|
fi
|
|
export DOCKER_BASE_IMAGE RELATION_CONFIG RELATION_DATA
|
|
{
|
|
(
|
|
SERVICE_NAME=$target_service
|
|
SERVICE_DATASTORE="$DATASTORE/$target_service"
|
|
SERVICE_CONFIGSTORE="$CONFIGSTORE/$target_service"
|
|
export SERVICE_NAME DOCKER_BASE_IMAGE SERVICE_DATASTORE SERVICE_CONFIGSTORE
|
|
charm.run_relation_hook local "$target_charm" "$relation_name" relation-joined
|
|
echo "$?" > "$relation_dir/target_errlvl"
|
|
) | logstdout "$DARKYELLOW$target_service$NORMAL/$DARKBLUE$relation_name$NORMAL (joined) ${GREEN}@${NORMAL}"
|
|
} 3>&1 1>&2 2>&3 | logstderr "$DARKYELLOW$target_service$NORMAL/$DARKBLUE$relation_name$NORMAL (joined) ${RED}@${NORMAL}" 3>&1 1>&2 2>&3
|
|
target_errlvl="$(cat "$relation_dir/target_errlvl")" || {
|
|
err "Relation script '$script_name' in $DARKPINK$target_charm$NORMAL" \
|
|
"failed before outputing an errorlevel."
|
|
((target_errlvl |= "1" ))
|
|
}
|
|
if [ -e "$RELATION_CONFIG" ]; then
|
|
debug "Merging some new config info in $DARKYELLOW$target_service$NORMAL"
|
|
_config_merge "$state_tmpdir/to-merge-in-docker-compose.yml" "$RELATION_CONFIG" &&
|
|
rm "$RELATION_CONFIG"
|
|
((target_errlvl |= "$?"))
|
|
fi
|
|
fi
|
|
|
|
if [ "$target_errlvl" == 0 ]; then
|
|
errlvl=0
|
|
if [ "$base_script_name" ]; then
|
|
verb "Running ${DARKBLUE}$relation_name${NORMAL} relation-joined script" \
|
|
"for $DARKYELLOW$service$NORMAL (charm $DARKPINK$charm$NORMAL)"
|
|
RELATION_CONFIG="$relation_dir/config_providee"
|
|
RELATION_DATA="$(cat "$RELATION_DATA_FILE")"
|
|
DOCKER_BASE_IMAGE=$(service_ensure_image_ready "$service") || return 1
|
|
export DOCKER_BASE_IMAGE RELATION_CONFIG RELATION_DATA
|
|
{
|
|
(
|
|
SERVICE_NAME=$service
|
|
SERVICE_DATASTORE="$DATASTORE/$service"
|
|
SERVICE_CONFIGSTORE="$CONFIGSTORE/$service"
|
|
export SERVICE_NAME DOCKER_BASE_IMAGE SERVICE_DATASTORE SERVICE_CONFIGSTORE
|
|
charm.run_relation_hook local "$charm" "$relation_name" relation-joined
|
|
echo "$?" > "$relation_dir/errlvl"
|
|
) | logstdout "$DARKYELLOW$service$NORMAL/$DARKBLUE$relation_name$NORMAL (joined) ${GREEN}@${NORMAL}"
|
|
} 3>&1 1>&2 2>&3 | logstderr "$DARKYELLOW$service$NORMAL/$DARKBLUE$relation_name$NORMAL (joined) ${RED}@$NORMAL" 3>&1 1>&2 2>&3
|
|
errlvl="$(cat "$relation_dir/errlvl")" || {
|
|
err "Relation script '$script_name' in $DARKPINK$charm$NORMAL" \
|
|
"failed before outputing an errorlevel."
|
|
((errlvl |= "1" ))
|
|
}
|
|
if [ -e "$RELATION_CONFIG" ]; then
|
|
_config_merge "$state_tmpdir/to-merge-in-docker-compose.yml" "$RELATION_CONFIG" &&
|
|
rm "$RELATION_CONFIG"
|
|
((errlvl |= "$?" ))
|
|
fi
|
|
if [ "$errlvl" != 0 ]; then
|
|
err "Relation $DARKBLUE$relation_name$NORMAL on $DARKYELLOW$service$NORMAL failed to run properly."
|
|
fi
|
|
else
|
|
verb "No relation script '$script_name' in charm $DARKPINK$charm$NORMAL. Ignoring."
|
|
fi
|
|
else
|
|
err "Relation $DARKBLUE$relation_name$NORMAL on $DARKYELLOW$target_service$NORMAL failed to run properly."
|
|
fi
|
|
|
|
if [ "$target_errlvl" == 0 -a "$errlvl" == 0 ]; then
|
|
debug "Relation $DARKBLUE$relation_name$NORMAL is established" \
|
|
"between $DARKYELLOW$service$NORMAL and $DARKYELLOW$target_service$NORMAL."
|
|
return 0
|
|
else
|
|
return 1
|
|
fi
|
|
}
|
|
export -f _run_service_relation
|
|
|
|
|
|
_get_compose_relations_cached () {
|
|
local compose_service_def="$1" cache_file="$CACHEDIR/$FUNCNAME.cache.$(echo "$*" | md5_compat)" \
|
|
relation_name relation_def target_service
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: STATIC cache hit $1"
|
|
cat "$cache_file" &&
|
|
touch "$cache_file" || return 1
|
|
return 0
|
|
fi
|
|
|
|
(
|
|
set -o pipefail
|
|
if [ "$compose_service_def" ]; then
|
|
while read-0 relation_name relation_def; do
|
|
## XXXvlab: could we use braces here instead of parenthesis ?
|
|
(
|
|
case "$(echo "$relation_def" | shyaml get-type 2>/dev/null)" in
|
|
"str")
|
|
target_service="$(echo "$relation_def" | shyaml get-value 2>/dev/null)" || return 1
|
|
tech_dep="$(get_service_relation_tech_dep "$target_service" "$relation_name")" || return 1
|
|
printf "%s\0" "$relation_name" "$target_service" "" "$tech_dep"
|
|
;;
|
|
"sequence")
|
|
while read-0 target_service; do
|
|
tech_dep="$(get_service_relation_tech_dep "$target_service" "$relation_name")" || return 1
|
|
printf "%s\0" "$relation_name" "$target_service" "" "$tech_dep"
|
|
done < <(echo "$relation_def" | shyaml get-values-0 2>/dev/null)
|
|
;;
|
|
"struct")
|
|
while read-0 target_service relation_config; do
|
|
tech_dep="$(get_service_relation_tech_dep "$target_service" "$relation_name")" || return 1
|
|
printf "%s\0" "$relation_name" "$target_service" "$relation_config" "$tech_dep"
|
|
done < <(echo "$relation_def" | shyaml key-values-0 2>/dev/null)
|
|
;;
|
|
esac
|
|
) </dev/null >> "$cache_file" || return 1
|
|
done < <(echo "$compose_service_def" | shyaml key-values-0 relations 2>/dev/null)
|
|
fi
|
|
)
|
|
if [ "$?" != 0 ]; then
|
|
err "Error while looking for compose relations."
|
|
rm -f "$cache_file" ## no cache
|
|
return 1
|
|
fi
|
|
[ -e "$cache_file" ] && cat "$cache_file"
|
|
return 0
|
|
}
|
|
export -f _get_compose_relations_cached
|
|
|
|
|
|
get_compose_relations () {
|
|
if [ -z "$COMBINED_HASH" ]; then
|
|
err-d "Expected \$COMBINED_HASH to be set."
|
|
return 1
|
|
fi
|
|
local service="$1" cache_file="$CACHEDIR/$FUNCNAME.cache.$1.$COMBINED_HASH" \
|
|
compose_def
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: SESSION cache hit $1"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
compose_def="$(get_compose_service_def "$service")" || return 1
|
|
_get_compose_relations_cached "$compose_def" > "$cache_file"
|
|
if [ "$?" != 0 ]; then
|
|
rm -f "$cache_file" ## no cache
|
|
return 1
|
|
fi
|
|
cat "$cache_file"
|
|
}
|
|
export -f get_compose_relations
|
|
|
|
|
|
get_all_services() {
|
|
local services compose_yml_services service
|
|
if [ -z "$GLOBAL_ALL_RELATIONS_HASH" ]; then
|
|
err-d "Can't access global \$GLOBAL_ALL_RELATIONS_HASH"
|
|
return 1
|
|
fi
|
|
local cache_file="$CACHEDIR/$FUNCNAME.cache.$(H "$GLOBAL_ALL_RELATIONS_HASH" "$(declare -f "$FUNCNAME")")" \
|
|
s rn ts rc td services service
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: cache hit $1"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
if [ -z "$GLOBAL_ALL_RELATIONS" ]; then
|
|
err-d "Can't access global \$GLOBAL_ALL_RELATIONS"
|
|
return 1
|
|
fi
|
|
|
|
declare -A services
|
|
while read-0 s _ ts _ _; do
|
|
for service in "$s" "$ts"; do
|
|
[ "${services[$service]}" ] && continue
|
|
services["$service"]=1
|
|
echo "$service"
|
|
done
|
|
done < "$GLOBAL_ALL_RELATIONS" > "$cache_file.wip"
|
|
|
|
compose_yml_services=($(compose:yml:root:services)) || return 1
|
|
|
|
for service in "${compose_yml_services[@]}"; do
|
|
[ "${services[$service]}" ] && continue
|
|
services["$service"]=1
|
|
echo "$service"
|
|
done >> "$cache_file.wip"
|
|
|
|
mv "$cache_file"{.wip,} || return 1
|
|
cat "$cache_file"
|
|
}
|
|
export -f get_all_services
|
|
|
|
|
|
get_service_relations () {
|
|
if [ -z "$GLOBAL_ALL_RELATIONS" ]; then
|
|
err-d "Can't access global \$GLOBAL_ALL_RELATIONS"
|
|
return 1
|
|
fi
|
|
local service="$1" cache_file="$CACHEDIR/$FUNCNAME.cache.$1.$GLOBAL_ALL_RELATIONS_HASH" \
|
|
s rn ts rc td
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: SESSION cache hit $1"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
while read-0 s rn ts rc td; do
|
|
[[ "$s" == "$service" ]] || continue
|
|
printf "%s\0" "$rn" "$ts" "$rc" "$td"
|
|
done < <(cat "$GLOBAL_ALL_RELATIONS") > "$cache_file"
|
|
|
|
cat "$cache_file"
|
|
}
|
|
export -f get_service_relations
|
|
|
|
|
|
get_service_relation() {
|
|
local service="$1" relation="$2" cache_file="$state_tmpdir/$FUNCNAME.cache.$1.$2" \
|
|
rn ts rc td
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: SESSION cache hit $1"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
while read-0-err E rn ts rc td; do
|
|
[ "$relation" == "$rn" ] && {
|
|
printf "%s\0" "$ts" "$rc" "$td"
|
|
break
|
|
}
|
|
done < <(p-err get_service_relations "$service") > "${cache_file}.wip"
|
|
if [ "$?" != 0 ]; then
|
|
return 1
|
|
fi
|
|
if [ "$E" != 0 ]; then
|
|
return 1
|
|
fi
|
|
mv "${cache_file}"{.wip,} || return 1
|
|
cat "$cache_file"
|
|
}
|
|
export -f get_service_relation
|
|
|
|
|
|
## From a service and a relation, get all relations targeting given
|
|
## service with given relation.
|
|
##
|
|
## Returns a NUL separated list of couple of:
|
|
## (base_service, relation_config)
|
|
##
|
|
get_service_incoming_relations() {
|
|
if [ -z "$SUBSET_ALL_RELATIONS_HASH" ]; then
|
|
err-d "Expected \$SUBSET_ALL_RELATIONS_HASH to be set."
|
|
return 1
|
|
fi
|
|
local service="$1" relation="$2" \
|
|
cache_file="$state_tmpdir/$FUNCNAME.cache.$(H "$@" "$SUBSET_ALL_RELATIONS_HASH")" \
|
|
s rn ts rc td
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: SESSION cache hit $1"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
while read-0 s rn ts rc _td; do
|
|
[[ "$ts" == "$service" ]] || continue
|
|
[[ "$rn" == "$relation" ]] || continue
|
|
relation_data_file=$(get_relation_data_file "$s" "$ts" "$rn" "$rc") || return 1
|
|
printf "%s\0" "$s" "$(cat "$relation_data_file")" || return 1
|
|
debug "Found relation $rn from $s to $ts" >&2
|
|
done < "$SUBSET_ALL_RELATIONS" > "$cache_file.wip"
|
|
mv "$cache_file"{.wip,} || return 1
|
|
cat "$cache_file"
|
|
}
|
|
export -f get_service_incoming_relations
|
|
|
|
|
|
export TRAVERSE_SEPARATOR=:
|
|
## Traverse on first service satisfying relation
|
|
service:traverse() {
|
|
local service_path="$1"
|
|
{
|
|
SEPARATOR=:
|
|
read -d "$TRAVERSE_SEPARATOR" service
|
|
while read -d "$TRAVERSE_SEPARATOR" relation; do
|
|
## XXXvlab: Take only first service
|
|
if ! read-0 ts _ _ < <(get_service_relation "${service}" "${relation}"); then
|
|
err "Couldn't find relation ${DARKCYAN}${relation}${NORMAL}" \
|
|
"from ${DARKYELLOW}$service${NORMAL}."
|
|
return 1
|
|
fi
|
|
service="$ts"
|
|
done
|
|
echo "$service"
|
|
} < <(e "${service_path}${TRAVERSE_SEPARATOR}")
|
|
}
|
|
export -f service:traverse
|
|
|
|
|
|
service:relation-file() {
|
|
local service_path="$1" relation service relation_file
|
|
if ! [[ "$service_path" == *"$TRAVERSE_SEPARATOR"* ]]; then
|
|
err "Invalid argument '$service_path'." \
|
|
"Must provide a service path (no '${TRAVERSE_SEPARATOR}' found)."
|
|
return 1
|
|
fi
|
|
relation="${service_path##*${TRAVERSE_SEPARATOR}}"
|
|
service=$(service:traverse "${service_path%${TRAVERSE_SEPARATOR}*}") || return 1
|
|
if ! read-0 ts rc _ < <(get_service_relation "${service}" "${relation}"); then
|
|
err "Couldn't find relation ${DARKCYAN}${relation}${NORMAL}" \
|
|
"from ${DARKYELLOW}$service${NORMAL}."
|
|
return 1
|
|
fi
|
|
relation_dir=$(get_relation_data_dir "$service" "$ts" "$relation") || {
|
|
err "Failed to find relation file"
|
|
return 1
|
|
}
|
|
relation_file="$relation_dir/data"
|
|
if ! [ -e "$relation_file" ]; then
|
|
e "$rc" > "$relation_file"
|
|
chmod go-rwx "$relation_file" ## protecting this file
|
|
fi
|
|
echo "$relation_file"
|
|
}
|
|
export -f service:relation-file
|
|
|
|
|
|
service:relation-options() {
|
|
local service_path="$1" relation_file
|
|
relation_file=$(service:relation-file "$service_path") || {
|
|
err "Failed to find relation file"
|
|
return 1
|
|
}
|
|
cat "$relation_file"
|
|
}
|
|
export -f service:relation-options
|
|
|
|
|
|
relation:get() {
|
|
local service_path="$1" query="$2" relation_file
|
|
relation_file=$(service:relation-file "$service_path") || {
|
|
err "Failed to find relation file"
|
|
return 1
|
|
}
|
|
cfg-get-value "$query" < "$relation_file"
|
|
}
|
|
export -f relation:get
|
|
|
|
services:get:upable() {
|
|
if [ -z "$CHARM_STORE_HASH" ]; then
|
|
err-d "Expected \$CHARM_STORE_HASH to be set."
|
|
return 1
|
|
fi
|
|
local services_args=("$@") cache_file="$CACHEDIR/$FUNCNAME.cache.$(H "$CHARM_STORE_HASH" "$@")"
|
|
if [ -e "$cache_file" ]; then
|
|
touch "$cache_file" || return 1
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
declare -A seen
|
|
services=($(get_ordered_service_dependencies "${services_args[@]}")) || exit 1
|
|
for service in "${services[@]}"; do
|
|
mservice=$(get_master_service_for_service "$service") || exit 1
|
|
[ "${seen[$mservice]}" ] && continue
|
|
type="$(get_service_type "$mservice")" || exit 1
|
|
## remove run-once
|
|
[ "$type" == "run-once" ] && continue
|
|
[ "$type" == "stub" ] && continue
|
|
seen[$mservice]=1
|
|
echo "$mservice"
|
|
done > "$cache_file".wip
|
|
mv "$cache_file".wip "$cache_file"
|
|
cat "$cache_file"
|
|
}
|
|
export -f services:get:upable
|
|
|
|
service:state() {
|
|
local service="$1" states state
|
|
project_name=$(get_default_project_name) || return 1
|
|
states=()
|
|
for state in "$SERVICE_STATE_PATH"/"$project_name"/"$service"/*; do
|
|
[ -e "$state" ] || continue
|
|
state=${state##*/}
|
|
states+=("$state")
|
|
done
|
|
|
|
if [[ " ${states[*]} " == *" deploying "* ]]; then
|
|
echo "deploying"
|
|
elif [[ " ${states[*]} " == *" up "* ]]; then
|
|
echo "up"
|
|
else
|
|
echo "down"
|
|
fi
|
|
}
|
|
export -f service:state
|
|
|
|
charm:upstream-version() {
|
|
local charm="$1" version cache_file="$state_tmpdir/$FUNCNAME.cache.$1" path
|
|
if [ -e "$cache_file" ]; then
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
if ! path=$(charm.has_direct_action "$charm" "upstream-versions"); then
|
|
return 0
|
|
fi
|
|
version=$("$path" -l 1) || {
|
|
err "Failed to get upstream version for ${DARKYELLOW}$charm${NORMAL}."
|
|
return 1
|
|
}
|
|
if path=$(charm.has_direct_action "$charm" "upstream-version-normalize"); then
|
|
version=$("$path" "$version") || {
|
|
err "Failed to normalize upstream version for ${DARKYELLOW}$charm${NORMAL}."
|
|
return 1
|
|
}
|
|
fi
|
|
echo "$version" > "$cache_file"
|
|
e "$version"
|
|
}
|
|
|
|
|
|
service:upstream-version() {
|
|
local service="$1" version
|
|
charm=$(get_service_charm "$service") || return 1
|
|
version=$(charm:upstream-version "$charm") || return 1
|
|
e "$version"
|
|
}
|
|
export -f service:upstream-version
|
|
|
|
|
|
_get_charm_metadata_uses() {
|
|
local metadata="$1" cache_file="$CACHEDIR/$FUNCNAME.cache.$(printf "%s\0" "$@" | md5_compat)"
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: SESSION cache hit $1"
|
|
cat "$cache_file" || return 1
|
|
return 0
|
|
fi
|
|
|
|
printf "%s" "$metadata" | { shyaml key-values-0 uses 2>/dev/null || true; } | tee "$cache_file"
|
|
}
|
|
export -f _get_charm_metadata_uses
|
|
|
|
|
|
_get_service_metadata() {
|
|
local service="$1" cache_file="$state_tmpdir/$FUNCNAME.cache.$1" \
|
|
charm
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: SESSION cache hit $1"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
charm="$(get_service_charm "$service")" || return 1
|
|
charm.metadata "$charm" > "$cache_file"
|
|
if [ "$?" != 0 ]; then
|
|
rm -f "$cache_file" ## no cache
|
|
return 1
|
|
fi
|
|
cat "$cache_file"
|
|
}
|
|
export -f _get_service_metadata
|
|
|
|
|
|
_get_service_uses() {
|
|
local service="$1" cache_file="$state_tmpdir/$FUNCNAME.cache.$1" \
|
|
metadata
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: SESSION cache hit $1"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
metadata="$(_get_service_metadata "$service")" || return 1
|
|
_get_charm_metadata_uses "$metadata" > "$cache_file"
|
|
if [ "$?" != 0 ]; then
|
|
rm -f "$cache_file" ## no cache
|
|
return 1
|
|
fi
|
|
cat "$cache_file"
|
|
}
|
|
export -f _get_service_uses
|
|
|
|
|
|
_get_services_uses() {
|
|
local cache_file="$state_tmpdir/$FUNCNAME.cache.$(printf "%s\0" "$@" | md5_compat)" \
|
|
service rn rd
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: SESSION cache hit $1"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
for service in "$@"; do
|
|
_get_service_uses "$service" | while read-0 rn rd; do
|
|
printf "%s\0" "$service" "$rn" "$rd"
|
|
done
|
|
[ "${PIPESTATUS[0]}" == 0 ] || {
|
|
return 1
|
|
}
|
|
done > "${cache_file}.wip"
|
|
mv "${cache_file}"{.wip,} &&
|
|
cat "$cache_file" || return 1
|
|
}
|
|
export -f _get_services_uses
|
|
|
|
|
|
_get_provides_provides() {
|
|
local provides="$1" cache_file="$CACHEDIR/$FUNCNAME.cache.$(printf "%s\0" "$@" | md5_compat)" \
|
|
service rn rd
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: CACHEDIR cache hit $1"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
type=$(printf "%s" "$provides" | shyaml get-type)
|
|
case "$type" in
|
|
sequence)
|
|
while read-0 prov; do
|
|
printf "%s\0" "$prov" ""
|
|
done < <(echo "$provides" | shyaml get-values-0)
|
|
;;
|
|
struct)
|
|
printf "%s" "$provides" | shyaml key-values-0
|
|
;;
|
|
str)
|
|
printf "%s\0" "$(echo "$provides" | shyaml get-value)" ""
|
|
;;
|
|
*)
|
|
err "Unexpected type '$type' for provider identifier in charm '$charm'."
|
|
return 1
|
|
esac | tee "$cache_file"
|
|
return "${PIPESTATUS[0]}"
|
|
}
|
|
|
|
|
|
_get_metadata_provides() {
|
|
local metadata="$1" cache_file="$CACHEDIR/$FUNCNAME.cache.$(printf "%s\0" "$@" | md5_compat)" \
|
|
service rn rd
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: CACHEDIR cache hit"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
provides=$(printf "%s" "$metadata" | shyaml -q get-value -y provides "")
|
|
[ "$provides" -a "$provides" != "''" ] || { touch "$cache_file"; return 0; }
|
|
|
|
_get_provides_provides "$provides" | tee "$cache_file"
|
|
return "${PIPESTATUS[0]}"
|
|
}
|
|
|
|
_get_services_provides() {
|
|
local cache_file="$state_tmpdir/$FUNCNAME.cache.$(printf "%s\0" "$@" | md5_compat)" \
|
|
service rn rd
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: SESSION cache hit $1"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
## YYY: replace the inner loop by a cached function
|
|
for service in "$@"; do
|
|
metadata="$(_get_service_metadata "$service")" || return 1
|
|
|
|
while read-0 rn rd; do
|
|
printf "%s\0" "$service" "$rn" "$rd"
|
|
done < <(_get_metadata_provides "$metadata")
|
|
done > "$cache_file"
|
|
if [ "$?" != 0 ]; then
|
|
rm -f "$cache_file" ## no cache
|
|
return 1
|
|
fi
|
|
cat "$cache_file"
|
|
}
|
|
export -f _get_services_provides
|
|
|
|
|
|
_get_charm_provides() {
|
|
if [ -z "$CHARM_STORE_HASH" ]; then
|
|
err-d "Expected \$CHARM_STORE_HASH to be set."
|
|
return 1
|
|
fi
|
|
local cache_file="$CACHEDIR/$FUNCNAME.cache.$CHARM_STORE_HASH" errlvl
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: SESSION cache hit"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
start="$SECONDS"
|
|
debug "Getting charm provider list..."
|
|
while read-0 charm _ realpath metadata; do
|
|
metadata="$(charm.metadata "$charm")" || continue
|
|
# echo "reading $charm" >&2
|
|
while read-0 rn rd; do
|
|
printf "%s\0" "$charm" "$rn" "$rd"
|
|
done < <(_get_metadata_provides "$metadata")
|
|
done < <(charm.ls) | tee "$cache_file"
|
|
errlvl="${PIPESTATUS[0]}"
|
|
debug " ..charm provider list done $GRAY(in $((SECONDS - start))s)$NORMAL"
|
|
return "$errlvl"
|
|
}
|
|
|
|
|
|
_get_charm_providing() {
|
|
local cache_file="$state_tmpdir/$FUNCNAME.cache.$(printf "%s\0" "$@" | md5_compat)" \
|
|
relation="$1"
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: SESSION cache hit $1"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
while read-0 charm relation_name relation_def; do
|
|
[ "$relation_name" == "$relation" ] || continue
|
|
printf "%s\0" "$charm" "$relation_def"
|
|
done < <(_get_charm_provides) > "$cache_file"
|
|
|
|
if [ "$?" != 0 ]; then
|
|
rm -f "$cache_file" ## no cache
|
|
return 1
|
|
fi
|
|
cat "$cache_file"
|
|
}
|
|
|
|
|
|
_get_services_providing() {
|
|
local cache_file="$state_tmpdir/$FUNCNAME.cache.$(printf "%s\0" "$@" | md5_compat)" \
|
|
relation="$1"
|
|
shift ## services is "$@"
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: SESSION cache hit $1"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
while read-0 service relation_name relation_def; do
|
|
[ "$relation_name" == "$relation" ] || continue
|
|
printf "%s\0" "$service" "$relation_def"
|
|
done < <(_get_services_provides "$@") > "$cache_file"
|
|
|
|
if [ "$?" != 0 ]; then
|
|
rm -f "$cache_file" ## no cache
|
|
return 1
|
|
fi
|
|
cat "$cache_file"
|
|
}
|
|
export -f _get_services_provides
|
|
|
|
|
|
_out_new_relation_from_defs() {
|
|
local service="$1" rn="$2" ts="$3" prov_def="$4" rel_def="$5" rc td rc_prov
|
|
|
|
rc_prov=$(printf "%s" "$prov_def" | shyaml -y get-value "default-options" 2>/dev/null)
|
|
## YYYvlab: should be seen even in no debug mode no ?
|
|
rc=$(printf "%s" "$rel_def" | shyaml -y get-value "default-options" 2>/dev/null)
|
|
|
|
td=$(echo "$prov_def" | shyaml get-value 'tech-dep' 2>/dev/null)
|
|
td=${td:-True}
|
|
rc=$(merge_yaml_str "$rc_prov" "$rc") || return 1
|
|
|
|
after=$(_out_after_value_from_def "$service" "$rn" "$rel_def") || return 1
|
|
printf "%s\0" "$after" "$service" "$relation_name" "$ts" "$rc" "$td"
|
|
}
|
|
|
|
|
|
_out_after_value_from_def() {
|
|
local service="$1" relation_name="$2" relation_def="$3" after_t after
|
|
if after_t=$(echo "$relation_def" | shyaml get-type after 2>/dev/null); then
|
|
case "$after_t" in
|
|
sequence)
|
|
after="$(echo "$relation_def" | shyaml get-values after 2>/dev/null)" || return 1
|
|
after=",$service:${after//$'\n'/,$service:},"
|
|
;;
|
|
struct)
|
|
err "Invalid type for ${WHITE}after${NORMAL}'s value in ${DARKBLUE}$relation_name${NORMAL}'s definition."
|
|
return 1
|
|
;;
|
|
str)
|
|
after=",$service:$(echo "$relation_def" | shyaml get-value after "" 2>/dev/null)," || return 1
|
|
;;
|
|
esac
|
|
else
|
|
after=""
|
|
fi
|
|
e "$after"
|
|
}
|
|
|
|
|
|
get_all_compose_yml_service() {
|
|
if [ -z "$COMPOSE_YML_CONTENT_HASH" ]; then
|
|
COMPOSE_YML_CONTENT_HASH=$(compose:yml:hash) || {
|
|
err "Failed to get compose yml hash"
|
|
return 1
|
|
}
|
|
fi
|
|
local cache_file="$CACHEDIR/$FUNCNAME.cache.$COMPOSE_YML_CONTENT_HASH"
|
|
if [ -e "${cache_file}" ]; then
|
|
#debug "$FUNCNAME: cache hit: ${cache_file}"
|
|
cat "${cache_file}"
|
|
return 0
|
|
fi
|
|
compose_yml_content=$(get_compose_yml_content) || return 1
|
|
printf "%s" "${compose_yml_content}" | shyaml keys-0 2>/dev/null > "${cache_file}.wip" || {
|
|
err "Failed to get keys of compose content."
|
|
return 1
|
|
}
|
|
mv "${cache_file}"{.wip,} || return 1
|
|
cat "${cache_file}"
|
|
}
|
|
|
|
|
|
## Outputs all relations array.
|
|
_service:all:relations_cached() {
|
|
local services service E
|
|
services=($(compose:yml:root:services)) || return 1
|
|
get_all_relations "${services[@]}" || return 1
|
|
}
|
|
|
|
|
|
## Outputs all relations array.
|
|
service:all:relations() {
|
|
if [ -z "$COMBINED_HASH" ]; then
|
|
err-d "Expected \$COMBINED_HASH to be set."
|
|
return 1
|
|
fi
|
|
local cache_file="$CACHEDIR/$FUNCNAME.cache.$COMBINED_HASH"
|
|
if [ -e "${cache_file}" ]; then
|
|
# debug "$FUNCNAME: SESSION cache hit $1"
|
|
cat "${cache_file}"
|
|
return 0
|
|
fi
|
|
|
|
_service:all:relations_cached > "${cache_file}.wip" || {
|
|
err-d "Failed to compute all relations."
|
|
return 1
|
|
}
|
|
|
|
mv "${cache_file}"{.wip,} || return 1
|
|
cat "${cache_file}"
|
|
}
|
|
|
|
_service:all:relations_hash_cached() {
|
|
if [ -z "$COMBINED_HASH" ]; then
|
|
err-d "Expected \$COMBINED_HASH to be set."
|
|
return 1
|
|
fi
|
|
local cache_file="$CACHEDIR/$FUNCNAME.cache.x${COMBINED_HASH}" \
|
|
hash
|
|
if [ -e "${cache_file}" ]; then
|
|
# debug "$FUNCNAME: SESSION cache hit $cache_file"
|
|
cat "${cache_file}"
|
|
return 0
|
|
fi
|
|
|
|
service:all:relations > "${cache_file}.pre" || {
|
|
err-d "Failed to get all relations."
|
|
return 1
|
|
}
|
|
{
|
|
p0 "$(hash_get < "${cache_file}.pre")" || return 1
|
|
cat "${cache_file}.pre"
|
|
rm "${cache_file}.pre"
|
|
} > "${cache_file}".wip || return 1
|
|
|
|
mv "${cache_file}"{.wip,} || return 1
|
|
cat "${cache_file}"
|
|
}
|
|
|
|
|
|
## Get all relations from all services in the current compose file.
|
|
## Sets GLOBAL_ALL_RELATIONS_HASH and returns all relations array.
|
|
service:all:set_relations_hash() {
|
|
if [ -n "$GLOBAL_ALL_RELATIONS" ]; then
|
|
if [ -z "$GLOBAL_ALL_RELATIONS_HASH" ]; then
|
|
err "Can't access global \$GLOBAL_ALL_RELATIONS_HASH"
|
|
echo " (despite \$GLOBAL_ALL_RELATIONS being set)" >&2
|
|
return 1
|
|
fi
|
|
return 0
|
|
fi
|
|
## sets COMPOSE_YML_CONTENT_HASH
|
|
_service:all:relations_hash_cached >/dev/null || return 1
|
|
{
|
|
read-0 GLOBAL_ALL_RELATIONS_HASH || return 1
|
|
export GLOBAL_ALL_RELATIONS_HASH
|
|
## transfer to statedir
|
|
export GLOBAL_ALL_RELATIONS="$CACHEDIR/$FUNCNAME.cache.$COMBINED_HASH"
|
|
cat > "$GLOBAL_ALL_RELATIONS"
|
|
} < <(_service:all:relations_hash_cached)
|
|
if [ -z "$GLOBAL_ALL_RELATIONS" ]; then
|
|
err "Failed to set \$GLOBAL_ALL_RELATIONS."
|
|
return 1
|
|
fi
|
|
if [ -z "$GLOBAL_ALL_RELATIONS_HASH" ]; then
|
|
err "Failed to set \$GLOBAL_ALL_RELATIONS_HASH."
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
get_subset_relations () {
|
|
local service all_services services start
|
|
if [ -n "$SUBSET_ALL_RELATIONS" ]; then
|
|
return 0
|
|
fi
|
|
if [ -z "$GLOBAL_ALL_RELATIONS_HASH" ]; then
|
|
err-d "Can't access global \$GLOBAL_ALL_RELATIONS_HASH"
|
|
return 1
|
|
fi
|
|
cache_hash=$(H "$@" "$GLOBAL_ALL_RELATIONS_HASH" "$(declare -f "$FUNCNAME")")
|
|
local cache_file="$CACHEDIR/$FUNCNAME.cache.$cache_hash"
|
|
if [ -e "${cache_file}" ]; then
|
|
export SUBSET_ALL_RELATIONS="$cache_file"
|
|
hash=$(hash_get < "$cache_file") || return 1
|
|
export SUBSET_ALL_RELATIONS_HASH="$hash"
|
|
cat "${cache_file}"
|
|
return 0
|
|
fi
|
|
## collect all connected services first
|
|
all_services=("$@")
|
|
declare -A services
|
|
while [ "${#all_services[@]}" != 0 ]; do
|
|
array_pop all_services service
|
|
# debug " Getting relations for $DARKYELLOW$service$NORMAL"
|
|
while read-0 s rn ts rc td; do
|
|
[[ "$s" == "$service" ]] || continue
|
|
# debug " adding relation $DARKBLUE$rn$NORMAL to $DARKYELLOW$ts$NORMAL"
|
|
p0 "$service" "$rn" "$ts" "$rc" "$td"
|
|
if [ -z "${services[$ts]}" ] && [[ " ${all_services[@]} " != *" $ts "* ]]; then
|
|
all_services+=("$ts")
|
|
fi
|
|
done < "$GLOBAL_ALL_RELATIONS"
|
|
services["$service"]=1
|
|
done > "$cache_file.wip"
|
|
mv "$cache_file"{.wip,} || return 1
|
|
export SUBSET_ALL_RELATIONS="$cache_file"
|
|
hash=$(hash_get < "$cache_file") || return 1
|
|
export SUBSET_ALL_RELATIONS_HASH="$hash"
|
|
cat "$cache_file"
|
|
}
|
|
export -f get_subset_relations
|
|
|
|
get_all_relations () {
|
|
if [ -z "$COMBINED_HASH" ]; then
|
|
err-d "Expected \$COMBINED_HASH to be set."
|
|
return 1
|
|
fi
|
|
if [ -n "$GLOBAL_ALL_RELATIONS" ]; then
|
|
cat "$GLOBAL_ALL_RELATIONS" || return 1
|
|
return 0
|
|
fi
|
|
local cache_file="$CACHEDIR/$FUNCNAME.cache.$(H "$@" "$COMBINED_HASH" "$(declare -p without_relations)")" \
|
|
services all_services service services_uses services_provides \
|
|
changed summon required recommended optional
|
|
|
|
if [ -e "${cache_file}" ]; then
|
|
#debug "$FUNCNAME: SESSION cache hit $1"
|
|
export GLOBAL_ALL_RELATIONS="$cache_file"
|
|
cat "${cache_file}"
|
|
return 0
|
|
fi
|
|
|
|
declare -A services
|
|
services_uses=()
|
|
## XXXvlab: bwerk, leveraging cache to be able to get the errorlevel here.
|
|
_get_services_uses "$@" >/dev/null || return 1
|
|
array_read-0 services_uses < <(_get_services_uses "$@")
|
|
services_provides=()
|
|
## XXXvlab: bwerk, leveraging cache to be able to get the errorlevel here.
|
|
_get_services_provides "$@" >/dev/null || return 1
|
|
array_read-0 services_provides < <(_get_services_provides "$@")
|
|
|
|
for service in "$@"; do
|
|
services[$service]=1
|
|
done
|
|
|
|
all_services=("$@")
|
|
while [ "${#all_services[@]}" != 0 ]; do
|
|
array_pop all_services service
|
|
while read-0-err E relation_name ts relation_config tech_dep; do
|
|
[ "${without_relations[$service:$relation_name]}" ] && {
|
|
debug "Ignoring compose $DARKYELLOW$service$NORMAL --$DARKBLUE$relation_name$NORMAL--> ${DARKYELLOW}$ts$NORMAL"
|
|
continue
|
|
}
|
|
## First is priority, that can be adjusted in second step
|
|
printf "%s\0" "" "$service" "$relation_name" "$ts" "$relation_config" "$tech_dep"
|
|
|
|
## adding target services ?
|
|
[ "${services[$ts]}" ] && continue
|
|
array_read-0 services_uses < <(_get_services_uses "$ts")
|
|
all_services+=("$ts")
|
|
services[$ts]=1
|
|
done < <(p-err get_compose_relations "$service")
|
|
if [ "$E" != 0 ]; then
|
|
err "Failed to get relations for $DARKYELLOW$service$NORMAL."
|
|
return 1
|
|
fi
|
|
done > "${cache_file}.wip"
|
|
|
|
while true; do
|
|
changed=
|
|
new_services_uses=()
|
|
summon=()
|
|
required=()
|
|
recommended=()
|
|
optional=()
|
|
while [ "${#services_uses[@]}" != 0 ]; do
|
|
service="${services_uses[0]}"
|
|
relation_name="${services_uses[1]}"
|
|
relation_def="${services_uses[2]}"
|
|
services_uses=("${services_uses[@]:3}")
|
|
|
|
[ "${without_relations[$service:$relation_name]}" ] && {
|
|
debug "Skipping $DARKYELLOW$service$NORMAL --$DARKBLUE$relation_name$NORMAL--> $DARKYELLOW*$NORMAL"
|
|
continue
|
|
}
|
|
|
|
default_options=$(printf "%s" "$relation_def" | shyaml -y get-value "default-options" 2>/dev/null)
|
|
after=$(_out_after_value_from_def "$service" "$relation_name" "$relation_def") || return 1
|
|
## is this "use" declaration satisfied ?
|
|
found=
|
|
while read-0 p s rn ts rc td; do
|
|
if [ -z "$found" -a "$service" == "$s" -a "$relation_name" == "$rn" ]; then
|
|
if [ "$default_options" ]; then
|
|
rc=$(merge_yaml_str "$default_options" "$rc") || return 1
|
|
fi
|
|
found="$ts"
|
|
p="$after"
|
|
fi
|
|
printf "%s\0" "$p" "$s" "$rn" "$ts" "$rc" "$td"
|
|
done < "${cache_file}.wip" > "${cache_file}.wip.new"
|
|
mv "${cache_file}.wip.new" "${cache_file}.wip"
|
|
if [ "$found" ]; then ## this "use" declaration was satisfied
|
|
debug "${DARKYELLOW}$service${NORMAL} use declaration for relation" \
|
|
"${DARKBLUE}$relation_name${NORMAL} is satisfied with ${DARKYELLOW}$found${NORMAL}"
|
|
continue
|
|
fi
|
|
|
|
auto=$(echo "$relation_def" | shyaml get-value auto pair 2>/dev/null)
|
|
auto=${auto:-pair}
|
|
case "$auto" in
|
|
"pair"|"summon")
|
|
service_list=()
|
|
array_read-0 service_list < <(array_keys_to_stdin services)
|
|
providers=()
|
|
providers_def=()
|
|
array_read-0 providers providers_def < <(_get_services_providing "$relation_name" "${service_list[@]}")
|
|
if [ "${#providers[@]}" == 1 ]; then
|
|
ts="${providers[0]}"
|
|
debug "Auto-pairs ${DARKYELLOW}$service${NORMAL}" \
|
|
"--${DARKBLUE}$relation_name${NORMAL}--> ${DARKYELLOW}$ts${NORMAL}"
|
|
|
|
_out_new_relation_from_defs "$service" "$relation_name" "$ts" \
|
|
"${providers_def[0]}" "$relation_def" \
|
|
>> "${cache_file}.wip" || return 1
|
|
|
|
## Adding service
|
|
[ "${services[$ts]}" ] && continue
|
|
array_read-0 new_services_uses < <(_get_services_uses "$ts")
|
|
services[$ts]=1
|
|
changed=1
|
|
continue
|
|
fi
|
|
if [ "${#providers[@]}" -gt 1 ]; then
|
|
msg=""
|
|
warn "No auto-pairing ${DARKYELLOW}$service${NORMAL}" \
|
|
"--${DARKBLUE}$relation_name${NORMAL}--> ($DARKYELLOW""${providers[@]}""$NORMAL)"\
|
|
"(> 1 provider)."
|
|
elif [ "$auto" == "summon" ]; then ## no provider
|
|
summon+=("$service" "$relation_name" "$relation_def")
|
|
fi
|
|
;;
|
|
null|disable|disabled)
|
|
:
|
|
;;
|
|
*)
|
|
err "Invalid ${WHITE}auto${NORMAL} value '$auto'."
|
|
return 1
|
|
;;
|
|
esac
|
|
constraint=$(echo "$relation_def" | shyaml get-value constraint 2>/dev/null)
|
|
constraint=${constraint:-optional}
|
|
case "$constraint" in
|
|
"required")
|
|
required+=("$service" "$relation_name" "$relation_def")
|
|
;;
|
|
"recommended")
|
|
recommended+=("$service" "$relation_name" "$relation_def")
|
|
;;
|
|
"optional")
|
|
optional+=("$service" "$relation_name" "$relation_def")
|
|
;;
|
|
*)
|
|
err "Invalid ${WHITE}constraint${NORMAL} value '$constraint'."
|
|
return 1
|
|
;;
|
|
esac
|
|
new_services_uses+=("$service" "$relation_name" "$relation_def") ## re-queue it
|
|
done
|
|
services_uses=("${new_services_uses[@]}")
|
|
|
|
if [ "$changed" ]; then
|
|
continue
|
|
fi
|
|
## situation is stable
|
|
|
|
if [ "${#summon[@]}" != 0 ]; then
|
|
declare -A summon_requeued=()
|
|
while [ "${#summon[@]}" != 0 ]; do
|
|
service="${summon[0]}"
|
|
relation_name="${summon[1]}"
|
|
relation_def="${summon[2]}"
|
|
summon=("${summon[@]:3}")
|
|
providers=()
|
|
providers_def=()
|
|
array_read-0 providers providers_def < <(_get_charm_providing "$relation_name" "${service_list[@]}")
|
|
|
|
## select first provider that is not a stub
|
|
new_providers=()
|
|
new_providers_def=()
|
|
while [[ "${#providers[@]}" != 0 ]]; do
|
|
provider="${providers[0]}"
|
|
provider_def="${providers_def[0]}"
|
|
providers=("${providers[@]:1}")
|
|
providers_def=("${providers_def[@]:1}")
|
|
type="$(get_service_type "$provider")" || true
|
|
[ "$type" == "stub" ] && continue
|
|
new_providers+=("$provider")
|
|
new_providers_def+=("$provider_def")
|
|
done
|
|
providers=("${new_providers[@]}")
|
|
providers_def=("${new_providers_def[@]}")
|
|
|
|
if [ "${#providers[@]}" == 0 ]; then
|
|
err "Summoning a ${DARKBLUE}$relation_name${NORMAL} provider failed: none were found in charm store."
|
|
return 1
|
|
fi
|
|
|
|
if [ "${#providers[@]}" -gt 1 ]; then
|
|
## if there are multiple providers (for instance
|
|
## sql-database), there are some case where other
|
|
## services will also summon a more specific
|
|
## postgres-database, that will solve our
|
|
## constraint. So we'd rather pass (and requeue)
|
|
if [ -z "${summon_requeued[$service/$relation_name]}" ]; then
|
|
debug "Auto-summon ${DARKYELLOW}$service${NORMAL}" \
|
|
"--${DARKBLUE}$relation_name${NORMAL}--> ($DARKYELLOW""${providers[@]}""$NORMAL)"\
|
|
"(> 1 provider). Requeuing."
|
|
summon+=("$service" "$relation_name" "$relation_def") ## re-queue it
|
|
summon_requeued["$service/$relation_name"]=1
|
|
continue
|
|
else
|
|
warn "Auto-summon ${DARKYELLOW}$service${NORMAL}" \
|
|
"--${DARKBLUE}$relation_name${NORMAL}--> ($DARKYELLOW""${providers[@]}""$NORMAL)"\
|
|
"(> 1 provider). Choosing first."
|
|
fi
|
|
fi
|
|
ts="${providers[0]}"
|
|
|
|
## YYYvlab: should be seen even in no debug mode no ?
|
|
debug "Auto-summon ${DARKYELLOW}$service${NORMAL}" \
|
|
"--${DARKBLUE}$relation_name${NORMAL}--> ${DARKYELLOW}$ts${NORMAL}"
|
|
|
|
_out_new_relation_from_defs "$service" "$relation_name" "$ts" \
|
|
"${providers_def[0]}" "$relation_def" \
|
|
>> "${cache_file}.wip" || return 1
|
|
|
|
## Adding service
|
|
[ "${services[$ts]}" ] && continue
|
|
array_read-0 services_uses < <(_get_services_uses "$ts")
|
|
services[$ts]=1
|
|
changed=1
|
|
continue 2
|
|
done
|
|
continue
|
|
fi
|
|
[ "$NO_CONSTRAINT_CHECK" ] && break
|
|
if [ "${#required[@]}" != 0 ]; then
|
|
echo "$(_display_solves required)" | sed -r "s/^/${RED}||${NORMAL} /g" >&2
|
|
err "Required relations not satisfied"
|
|
return 1
|
|
fi
|
|
if [ "${#recommended[@]}" != 0 ]; then
|
|
## make recommendation
|
|
echo "$(_display_solves recommended)" | sed -r "s/^/${YELLOW}||${NORMAL} /g" >&2
|
|
fi
|
|
if [ -z "$QUIET" ]; then
|
|
if [ "${#optional[@]}" != 0 ]; then
|
|
## inform about options
|
|
echo "$(_display_solves optional)" | sed -r "s/^/${BLUE}||${NORMAL} /g" >&2
|
|
fi
|
|
fi
|
|
# if [ "${#required[@]}" != 0 ]; then
|
|
# err "Required relations not satisfied"
|
|
# return 1
|
|
# fi
|
|
if [ "${#recommended[@]}" != 0 ]; then
|
|
warn "Recommended relations not satisfied"
|
|
fi
|
|
break
|
|
done
|
|
if [ "$?" != 0 ]; then
|
|
rm -f "${cache_file}"{,.wip,.wip.new} ## no cache
|
|
return 1
|
|
fi
|
|
|
|
##
|
|
## Sort relations thanks to uses =metadata.yml= relations.
|
|
##
|
|
|
|
mv "${cache_file}.wip"{,.in} &&
|
|
rm -f "${cache_file}.wip.final" &&
|
|
touch "${cache_file}.wip.final" || {
|
|
err "Unexpected error when mangling cache files."
|
|
return 1
|
|
}
|
|
declare -A relation_done=()
|
|
while true; do
|
|
had_remaining_relation=
|
|
had_new_relation=
|
|
while read-0 p s rn ts rc td; do
|
|
if [ -z "$p" ] || [ "$p" == "," ]; then
|
|
relation_done["$s:$rn"]=1
|
|
# printf " .. %-30s %-30s %-30s\n" "$s" "$ts" "$rn" >&2
|
|
printf "%s\0" "$s" "$rn" "$ts" "$rc" "$td" >> "${cache_file}.wip.final"
|
|
had_new_relation=1
|
|
else
|
|
# printf " !! %-30s %-30s %-30s\n" "$p" "$s" "$rn" >&2
|
|
printf "%s\0" "$p" "$s" "$rn" "$ts" "$rc" "$td" >> "${cache_file}.wip.out"
|
|
had_remaining_relation=1
|
|
fi
|
|
done < "${cache_file}.wip.in"
|
|
[ -z "$had_remaining_relation" ] && break
|
|
mv "${cache_file}.wip."{out,in}
|
|
while read-0 p s rn ts rc td; do
|
|
for rel in "${!relation_done[@]}"; do
|
|
p="${p//,$rel,/,}"
|
|
done
|
|
# printf " CC %-30s %-30s %-30s\n" "$p" "$s" "$rn" >&2
|
|
if [ -z "$had_new_relation" ]; then
|
|
err "${DARKYELLOW}$s${NORMAL} --${DARKBLUE}$rn${NORMAL}--> ${DARKYELLOW}$ts${NORMAL} missing required ${WHITE}after${NORMAL} relations:"
|
|
for rel in ${p//,/ }; do
|
|
rel_s=${rel%%:*}
|
|
rel_r=${rel##*:}
|
|
echo " - ${DARKYELLOW}$rel_s${NORMAL} --${DARKBLUE}$rel_r${NORMAL}--> ${DARKGRAY}*${NORMAL}" >&2
|
|
done
|
|
else
|
|
printf "%s\0" "$p" "$s" "$rn" "$ts" "$rc" "$td" >> "${cache_file}.wip.out"
|
|
fi
|
|
done < "${cache_file}.wip.in"
|
|
if [ -z "$had_new_relation" ]; then
|
|
rm -f "${cache_file}"{,.wip{,new,in,out,final}} ## no cache
|
|
return 1
|
|
fi
|
|
mv "${cache_file}.wip."{out,in}
|
|
done
|
|
|
|
mv "${cache_file}"{.wip.final,} || return 1
|
|
export GLOBAL_ALL_RELATIONS="$cache_file"
|
|
GLOBAL_ALL_RELATIONS_HASH=$(hash_get < "$cache_file") || return 1
|
|
export GLOBAL_ALL_RELATIONS_HASH
|
|
cat "$cache_file"
|
|
}
|
|
export -f get_all_relations
|
|
|
|
|
|
_display_solves() {
|
|
local array_name="$1" by_relation msg
|
|
## inform about options
|
|
msg=""
|
|
declare -A by_relation
|
|
while read-0 service relation_name relation_def; do
|
|
solves=$(printf "%s" "$relation_def" | shyaml -y get-value solves 2>/dev/null);
|
|
auto=$(printf "%s" "$relation_def" | shyaml get-value auto 2>/dev/null);
|
|
if [ -z "$solves" ]; then
|
|
continue
|
|
fi
|
|
by_relation[$relation_name]+=$(printf "\n %s" "${DARKYELLOW}$service$NORMAL for:")
|
|
if [ "$auto" == "pair" ]; then
|
|
requirement="add provider in cluster to auto-pair"
|
|
else
|
|
requirement="add explicit relation"
|
|
fi
|
|
while read-0 name def; do
|
|
by_relation[$relation_name]+=$(printf "\n - ${DARKCYAN}%-15s${NORMAL} %s (%s)" "$name" "$def" "$requirement")
|
|
done < <(printf "%s" "$solves" | shyaml key-values-0)
|
|
done < <(array_values_to_stdin "$array_name")
|
|
|
|
while read-0 relation_name message; do
|
|
msg+="$(printf "\n${DARKBLUE}%s$NORMAL provider is $array_name by%s" \
|
|
"$relation_name" "$message" )"
|
|
done < <(array_kv_to_stdin by_relation)
|
|
|
|
if [ "$msg" ]; then
|
|
printf "%s\n" "${msg:1}"
|
|
fi
|
|
}
|
|
|
|
|
|
get_compose_relation_def() {
|
|
local service="$1" relation="$2" relation_name target_service relation_config tech_dep
|
|
while read-0 relation_name target_service relation_config tech_dep; do
|
|
[ "$relation_name" == "$relation" ] || continue
|
|
printf "%s\0%s\0%s\0" "$target_service" "$relation_config" "$tech_dep"
|
|
done < <(get_compose_relations "$service") || return 1
|
|
}
|
|
export -f get_compose_relation_def
|
|
|
|
|
|
run_service_relations () {
|
|
local service services loaded subservices subservice
|
|
|
|
PROJECT_NAME=$(get_default_project_name) || return 1
|
|
|
|
export PROJECT_NAME
|
|
|
|
declare -A loaded
|
|
subservices=$(get_ordered_service_dependencies "$@") || return 1
|
|
for service in $subservices; do
|
|
|
|
# debug "Upping dep's relations of ${DARKYELLOW}$service${NORMAL}:"
|
|
for subservice in $(get_service_deps "$service") "$service"; do
|
|
[ "${loaded[$subservice]}" ] && continue
|
|
|
|
export BASE_SERVICE_NAME=$service
|
|
MASTER_BASE_SERVICE_NAME=$(get_top_master_service_for_service "$subservice") || return 1
|
|
MASTER_BASE_CHARM_NAME=$(get_service_charm "$MASTER_BASE_SERVICE_NAME") || return 1
|
|
RELATION_BASE_COMPOSE_DEF=$(get_compose_service_def "$subservice") || return 1
|
|
export RELATION_BASE_COMPOSE_DEF MASTER_BASE_{CHARM,SERVICE}_NAME
|
|
|
|
# debug " Relations of ${DARKYELLOW}$subservice${NORMAL}:"
|
|
while read-0 relation_name target_service relation_config tech_dep; do
|
|
[ "${without_relations[$service:$relation_name]}" ] && {
|
|
debug "Skipping $DARKYELLOW$service$NORMAL --$DARKBLUE$relation_name$NORMAL--> $DARKYELLOW*$NORMAL"
|
|
continue
|
|
}
|
|
|
|
export relation_config
|
|
export TARGET_SERVICE_NAME=$target_service
|
|
MASTER_TARGET_SERVICE_NAME=$(get_top_master_service_for_service "$target_service") || return 1
|
|
MASTER_TARGET_CHARM_NAME=$(get_service_charm "$MASTER_TARGET_SERVICE_NAME") || return 1
|
|
RELATION_TARGET_COMPOSE_DEF=$(get_compose_service_def "$target_service") || return 1
|
|
export RELATION_TARGET_COMPOSE_DEF MASTER_TARGET_{CHARM,SERVICE}_NAME
|
|
|
|
Wrap "${wrap_opts[@]}" -d "building $DARKYELLOW$subservice$NORMAL --$DARKBLUE$relation_name$NORMAL--> $DARKYELLOW$target_service$NORMAL" <<EOF || return 1
|
|
_run_service_relation "$relation_name" "$subservice" "$target_service" "\$relation_config"
|
|
EOF
|
|
done < <(get_service_relations "$subservice") || return 1
|
|
loaded[$subservice]=1
|
|
done
|
|
done
|
|
}
|
|
export -f run_service_relations
|
|
|
|
|
|
_run_service_action_direct() {
|
|
local service="$1" action="$2" charm _dummy project_name
|
|
shift; shift
|
|
|
|
read-0 charm action_script_path || true ## against 'set -e' that could be setup in parent scripts
|
|
|
|
if read-0 _dummy || [ "$_dummy" ]; then
|
|
print_syntax_error "$FUNCNAME: too many arguments in action descriptor"
|
|
return 1
|
|
fi
|
|
|
|
project_name=$(get_default_project_name) || return 1
|
|
export PROJECT_NAME="$project_name"
|
|
|
|
export state_tmpdir
|
|
(
|
|
set +e ## Prevents unwanted leaks from parent shell
|
|
export COMPOSE_CONFIG=$(get_compose_yml_content)
|
|
export METADATA_CONFIG=$(charm.metadata "$charm")
|
|
export SERVICE_NAME=$service
|
|
export ACTION_NAME=$action
|
|
export ACTION_SCRIPT_PATH="$action_script_path"
|
|
export CONTAINER_NAME=$(get_top_master_service_for_service "$service")
|
|
export DOCKER_BASE_IMAGE=$(service_ensure_image_ready "$CONTAINER_NAME")
|
|
export SERVICE_DATASTORE="$DATASTORE/$service"
|
|
export SERVICE_CONFIGSTORE="$CONFIGSTORE/$service"
|
|
exname="$exname $ACTION_NAME $SERVICE_NAME" \
|
|
stdbuf -oL -eL bash -c 'charm.run_direct_action "$@"' -- "$charm" "$action" "$@"
|
|
) 0<&6 ## inject general stdin
|
|
}
|
|
export -f _run_service_action_direct
|
|
|
|
|
|
_run_service_action_relation() {
|
|
local service="$1" action="$2" charm target_charm relation_name relation_config _dummy
|
|
shift; shift
|
|
|
|
read-0 charm target_service target_charm relation_name relation_config action_script_path || true
|
|
|
|
if read-0 _dummy || [ "$_dummy" ]; then
|
|
print_syntax_error "$FUNCNAME: too many arguments in action descriptor"
|
|
return 1
|
|
fi
|
|
RELATION_DATA_FILE=$(get_relation_data_file "$service" "$target_service" "$relation_name" "$relation_config") || return 1
|
|
|
|
export action_errlvl_file="$state_tmpdir/action-$service-$charm-$action-errlvl"
|
|
export state_tmpdir
|
|
(
|
|
set +e ## Prevents unwanted leaks from parent shell
|
|
export METADATA_CONFIG=$(charm.metadata "$charm")
|
|
export SERVICE_NAME=$service
|
|
export RELATION_TARGET_SERVICE="$target_service"
|
|
export RELATION_TARGET_CHARM="$target_charm"
|
|
export RELATION_BASE_SERVICE="$service"
|
|
export RELATION_BASE_CHARM="$charm"
|
|
export RELATION_DATA_FILE="$RELATION_DATA_FILE"
|
|
export ACTION_NAME=$action
|
|
export ACTION_SCRIPT_PATH="$action_script_path"
|
|
export CONTAINER_NAME=$(get_top_master_service_for_service "$service")
|
|
export DOCKER_BASE_IMAGE=$(service_ensure_image_ready "$CONTAINER_NAME")
|
|
export SERVICE_DATASTORE="$DATASTORE/$service"
|
|
export SERVICE_CONFIGSTORE="$CONFIGSTORE/$service"
|
|
exname="$exname $ACTION_NAME $SERVICE_NAME" \
|
|
stdbuf -oL -eL bash -c 'charm.run_relation_action "$@"' -- "$target_charm" "$relation_name" "$action" "$@"
|
|
) 0<&6 ## inject general stdin
|
|
}
|
|
export -f _run_service_action_relation
|
|
|
|
|
|
get_relation_data_dir() {
|
|
local service="$1" target_service="$2" relation_name="$3" \
|
|
cache_file="$state_tmpdir/$FUNCNAME.cache.$(printf "%s\0" "$@" | md5_compat)"
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
local project relation_dir
|
|
project=${PROJECT_NAME}
|
|
if [ -z "$project" ]; then
|
|
project=$(get_default_project_name) || return 1
|
|
fi
|
|
relation_dir="$VARDIR/relations/$project/${service}-${target_service}/$relation_name"
|
|
if ! [ -d "$relation_dir" ]; then
|
|
mkdir -p "$relation_dir" || return 1
|
|
chmod go-rwx "$relation_dir" || return 1 ## protecting this directory
|
|
fi
|
|
echo "$relation_dir" | tee "$cache_file"
|
|
}
|
|
export -f get_relation_data_dir
|
|
|
|
|
|
get_relation_data_file() {
|
|
local service="$1" target_service="$2" relation_name="$3" relation_config="$4" \
|
|
new new_md5 relation_dir relation_data_file
|
|
|
|
relation_dir=$(get_relation_data_dir "$service" "$target_service" "$relation_name") || return 1
|
|
relation_data_file="$relation_dir/data"
|
|
|
|
new=
|
|
if [ -e "$relation_data_file" ]; then
|
|
## Has reference changed ?
|
|
new_md5=$(e "$relation_config" | md5_compat)
|
|
if [ "$new_md5" != "$(cat "$relation_data_file.md5_ref" 2>/dev/null)" ]; then
|
|
new=true
|
|
fi
|
|
else
|
|
new=true
|
|
fi
|
|
|
|
if [ -n "$new" ]; then
|
|
OLDUMASK=$(umask)
|
|
umask 0077
|
|
e "$relation_config" > "$relation_data_file"
|
|
umask "$OLDUMASK"
|
|
e "$relation_config" | md5_compat > "$relation_data_file.md5_ref"
|
|
fi
|
|
echo "$relation_data_file"
|
|
}
|
|
export -f get_relation_data_file
|
|
|
|
|
|
has_service_action () {
|
|
if [ -z "$CHARM_STORE_HASH" ]; then
|
|
err-d "Can't access global \$CHARM_STORE_HASH"
|
|
return 1
|
|
fi
|
|
local service="$1" action="$2" cache_file="$CACHEDIR/$FUNCNAME.cache.$1.$2.$CHARM_STORE_HASH" \
|
|
charm target_charm relation_name target_service relation_config _tech_dep \
|
|
path
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
if [ -s "$cache_file" ]; then
|
|
cat "$cache_file"
|
|
return 0
|
|
else
|
|
return 1
|
|
fi
|
|
fi
|
|
|
|
charm=$(get_service_charm "$service") || return 1
|
|
|
|
## Action directly provided ?
|
|
|
|
if path=$(charm.has_direct_action "$charm" "$action"); then
|
|
p0 "direct" "$charm" "$path" | tee "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
## Action provided by relation ?
|
|
|
|
while read-0 relation_name target_service relation_config _tech_dep; do
|
|
target_charm=$(get_service_charm "$target_service") || return 1
|
|
if path=$(charm.has_relation_action "$target_charm" "$relation_name" "$action"); then
|
|
p0 "relation" "$charm" "$target_service" "$target_charm" "$relation_name" "$relation_config" "$path" | tee "$cache_file"
|
|
return 0
|
|
fi
|
|
done < <(get_service_relations "$service")
|
|
|
|
touch "$cache_file"
|
|
return 1
|
|
# master=$(get_top_master_service_for_service "$service")
|
|
# [ "$master" == "$charm" ] && return 1
|
|
|
|
# has_service_action "$master" "$action"
|
|
}
|
|
export -f has_service_action
|
|
|
|
|
|
run_service_action () {
|
|
local service="$1" action="$2" errlvl
|
|
shift ; shift
|
|
exec 6<&0 ## saving stdin
|
|
{
|
|
if ! read-0 action_type; then
|
|
info "Service $DARKYELLOW$service$NORMAL does not have any action $DARKCYAN$action$NORMAL defined."
|
|
info " Add an executable script to 'actions/$action' to implement action."
|
|
return 1
|
|
fi
|
|
|
|
"_run_service_action_${action_type}" "$service" "$action" "$@"
|
|
errlvl="$?"
|
|
|
|
} < <(has_service_action "$service" "$action")
|
|
|
|
exec 0<&6 6<&- ## restoring stdin
|
|
return "$errlvl"
|
|
}
|
|
export -f run_service_action
|
|
|
|
|
|
get_compose_relation_config() {
|
|
local service=$1 relation_config cache_file="$state_tmpdir/$FUNCNAME.cache.$1"
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
compose_service_def=$(get_compose_service_def "$service") || return 1
|
|
|
|
echo "$compose_service_def" | shyaml get-value "relations" 2>/dev/null | tee "$cache_file"
|
|
}
|
|
export -f get_compose_relation_config
|
|
|
|
|
|
# ## Return key-values-0
|
|
# get_compose_relation_config_for_service() {
|
|
# local service=$1 relation_name=$2 relation_config
|
|
# compose_service_relations=$(get_compose_relation_config "$service") || return 1
|
|
# if ! relation_config=$(
|
|
# echo "$compose_service_relations" |
|
|
# shyaml get-value "${relation_name}" 2>/dev/null); then
|
|
# err "Couldn't find $DARKYELLOW${service}$NORMAL/${WHITE}${relation_name}$NORMAL" \
|
|
# "relation config in compose configuration."
|
|
# return 1
|
|
# fi
|
|
# if [ -z "$relation_config" ]; then
|
|
# err "Relation ${WHITE}mysql-database$NORMAL is empty in compose configuration."
|
|
# return 1
|
|
# fi
|
|
# if ! echo "$relation_config" | shyaml key-values-0 2>/dev/null; then
|
|
# err "No key/values in ${DARKBLUE}mysql-database$NORMAL of compose config."
|
|
# return 1
|
|
# fi
|
|
# }
|
|
# export -f get_compose_relation_config_for_service
|
|
|
|
|
|
_get_container_relation() {
|
|
local metadata=$1 found relation_name relation_def
|
|
|
|
found=
|
|
while read-0 relation_name relation_def; do
|
|
[ "$(echo "$relation_def" | shyaml get-value "scope" 2>/dev/null)" == "container" ] && {
|
|
found="$relation_name"
|
|
break
|
|
}
|
|
done < <(_get_charm_metadata_uses "$metadata")
|
|
if [ -z "$found" ]; then
|
|
err "Charm $DARKPINK$charm$NORMAL is a subordinate but does not have any required relation declaration with" \
|
|
"${WHITE}scope${NORMAL} set to 'container'."
|
|
return 1
|
|
fi
|
|
printf "%s" "$found"
|
|
}
|
|
|
|
|
|
_get_master_service_for_service_cached () {
|
|
local service="$1" charm="$2" metadata="$3" cache_file="$CACHEDIR/$FUNCNAME.cache.$(echo "$*" | md5_compat)" \
|
|
charm requires master_charm target_charm target_service service_def found
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: STATIC cache hit ($1)"
|
|
cat "$cache_file" &&
|
|
touch "$cache_file" || return 1
|
|
return 0
|
|
fi
|
|
|
|
if ! [[ "$(echo "$metadata" | shyaml get-value "subordinate" 2>/dev/null)" =~ ^True|true$ ]]; then
|
|
## just return service name
|
|
echo "$service" | tee "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
## Action provided by relation ?
|
|
container_relation=$(_get_container_relation "$metadata") || return 1
|
|
read-0 target_service _ _ < <(get_service_relation "$service" "$container_relation")
|
|
if [ -z "$target_service" ]; then
|
|
err "Couldn't find ${WHITE}relations.${container_relation}${NORMAL} in" \
|
|
"${DARKYELLOW}$service$NORMAL compose definition."
|
|
err ${FUNCNAME[@]}
|
|
return 1
|
|
fi
|
|
echo "$target_service" | tee "$cache_file"
|
|
}
|
|
export -f _get_master_service_for_service_cached
|
|
|
|
|
|
get_master_service_for_service() {
|
|
if [ -z "$CHARM_STORE_HASH" ]; then
|
|
err-d "Expected \$CHARM_STORE_HASH to be set."
|
|
return 1
|
|
fi
|
|
local service="$1" cache_file="$CACHEDIR/$FUNCNAME.cache.$1.$CHARM_STORE_HASH" \
|
|
charm metadata result
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: SESSION cache hit ($*)"
|
|
cat "$cache_file" || return 1
|
|
return 0
|
|
fi
|
|
charm=$(get_service_charm "$service") || return 1
|
|
metadata=$(charm.metadata "$charm" 2>/dev/null) || {
|
|
metadata=""
|
|
warn "No charm $DARKPINK$charm$NORMAL found."
|
|
}
|
|
result=$(_get_master_service_for_service_cached "$service" "$charm" "$metadata") || return 1
|
|
echo "$result" | tee "$cache_file" || return 1
|
|
}
|
|
export -f get_master_service_for_service
|
|
|
|
|
|
get_top_master_service_for_service() {
|
|
if [ -z "$CHARM_STORE_HASH" ]; then
|
|
err-d "Expected \$CHARM_STORE_HASH to be set."
|
|
return 1
|
|
fi
|
|
local service="$1" cache_file="$CACHEDIR/$FUNCNAME.cache.$1.$CHARM_STORE_HASH" \
|
|
current_service
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
touch "$cache_file" || return 1
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
current_service="$service"
|
|
while true; do
|
|
master_service=$(get_master_service_for_service "$current_service") || return 1
|
|
[ "$master_service" == "$current_service" ] && break
|
|
current_service="$master_service"
|
|
done
|
|
echo "$current_service" | tee "$cache_file"
|
|
return 0
|
|
}
|
|
export -f get_top_master_service_for_service
|
|
|
|
|
|
##
|
|
## The result is a mixin that is not always a complete valid
|
|
## docker-compose entry (thinking of subordinates). The result
|
|
## will be merge with master charms.
|
|
_get_docker_compose_mixin_from_metadata_cached() {
|
|
local service="$1" charm="$2" metadata="$3" \
|
|
has_build_dir="$4" \
|
|
cache_file="$CACHEDIR/$FUNCNAME.cache.$(echo "$*" | md5_compat)" \
|
|
metadata_file metadata volumes docker_compose subordinate image \
|
|
mixin mixins tmemory memory limit docker_memory
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: STATIC cache hit $1"
|
|
cat "$cache_file" &&
|
|
touch "$cache_file" || return 1
|
|
return 0
|
|
fi
|
|
|
|
mixins=("$(echo -en "labels:\n- compose.charm=$charm")")
|
|
if [ "$metadata" ]; then
|
|
## resources to volumes
|
|
volumes=$(
|
|
for resource_type in data config; do
|
|
while read-0 resource; do
|
|
eval "echo \" - \$HOST_${resource_type^^}STORE/\$service\$resource:\$resource:rw\""
|
|
done < <(echo "$metadata" | shyaml get-values-0 "${resource_type}-resources" 2>/dev/null)
|
|
done
|
|
while read-0 resource; do
|
|
if [[ "$resource" == /*:/*:* ]]; then
|
|
echo " - $resource"
|
|
elif [[ "$resource" == /*:/* ]]; then
|
|
echo " - $resource:rw"
|
|
elif [[ "$resource" == /*:* ]]; then
|
|
echo " - ${resource%%:*}:$resource"
|
|
elif [[ "$resource" =~ ^/[^:]+$ ]]; then
|
|
echo " - $resource:$resource:rw"
|
|
else
|
|
die "Invalid host-resource specified in 'metadata.yml'."
|
|
fi
|
|
done < <(printf "%s" "$metadata" | shyaml get-values-0 "host-resources" 2>/dev/null)
|
|
while read-0 resource; do
|
|
dest="$(charm.get_dir "$charm")/resources$resource"
|
|
if ! [ -e "$dest" ]; then
|
|
die "charm-resource: '$resource' does not exist (file: '$dest')."
|
|
fi
|
|
echo " - $dest:$resource:ro"
|
|
done < <(echo "$metadata" | shyaml get-values-0 "charm-resources" 2>/dev/null)
|
|
) || return 1
|
|
if [ "$volumes" ]; then
|
|
mixins+=("volumes:"$'\n'"$volumes")
|
|
fi
|
|
|
|
type="$(printf "%s" "$metadata" | shyaml get-value type 2>/dev/null)" || true
|
|
if [ "$type" != "run-once" ]; then
|
|
mixins+=("restart: unless-stopped")
|
|
fi
|
|
|
|
docker_compose=$(printf "%s" "$metadata" | shyaml get-value -y "docker-compose" 2>/dev/null) || true
|
|
if [ "$docker_compose" ]; then
|
|
mixins+=("$docker_compose")
|
|
fi
|
|
|
|
if [[ "$(echo "$metadata" | shyaml get-value "subordinate" 2>/dev/null)" =~ ^True|true$ ]]; then
|
|
subordinate=true
|
|
fi
|
|
fi
|
|
|
|
image=$(echo "$metadata" | shyaml get-value "docker-image" 2>/dev/null)
|
|
[ "$image" == "None" ] && image=""
|
|
if [ -n "$image" ]; then
|
|
if [ -n "$subordinate" ]; then
|
|
err "Subordinate charm can not have a ${WHITE}docker-image${NORMAL} value."
|
|
return 1
|
|
fi
|
|
mixins+=("image: $image")
|
|
elif [ "$has_build_dir" ]; then
|
|
if [ "$subordinate" ]; then
|
|
err "Subordinate charm can not have a 'build' sub directory."
|
|
return 1
|
|
fi
|
|
mixins+=("build: $(charm.get_dir "$charm")/build")
|
|
fi
|
|
limit=$(e "$metadata" | yq '.limit' 2>/dev/null) || return 1
|
|
[ "$limit" == "null" ] && limit=""
|
|
if [ -n "$limit" ]; then
|
|
if ! read-0-err E tmemory memory < <(e "$limit" | wyq ".memory | type, .memory") ||
|
|
[ "$E" != 0 ]; then
|
|
err "Unexpected error in ${DARKPINK}$charm${NORMAL}'s metadata when parsing ${WHITE}.limit${NORMAL}"
|
|
return 1
|
|
fi
|
|
case "$tmemory" in
|
|
'!!str'|'!!int')
|
|
docker_memory="$(e "$memory" | numfmt --from iec)" || {
|
|
err "Invalid format specified for .limit.memory: '$memory'."
|
|
return 1
|
|
}
|
|
;;
|
|
'!!float')
|
|
err "Unexpected value in ${DARKPINK}$charm${NORMAL}'s metadata for ${WHITE}.limit.memory${NORMAL}."
|
|
echo " You need to specify a unit (like 'K', 'M', 'G' ...)." >&2
|
|
return 1
|
|
;;
|
|
'!!null')
|
|
:
|
|
;;
|
|
*)
|
|
err "Unexpected type '${tmemory#!!}' in ${DARKPINK}$charm${NORMAL}'s metadata" \
|
|
"for ${WHITE}.limit.memory${NORMAL}."
|
|
echo " You need to check ${DARKPINK}$charm${NORMAL}'s metadata " \
|
|
"for ${WHITE}.limit.memory${NORMAL} and provide a valid value" >&2
|
|
echo " Example values: '1.5G', '252M', ..." >&2
|
|
return 1
|
|
;;
|
|
esac
|
|
if [ -n "$docker_memory" ]; then
|
|
if [[ "$docker_memory" -lt 6291456 ]]; then
|
|
err "Can't limit service to lower than 6M."
|
|
echo " Specified limit of $memory (=$docker_memory) is lower than docker's min limit of 6M (=6291456)." >&2
|
|
echo " The provided limit to memory is lower than minimum memory for a container." >&2
|
|
echo " Please remove memory limit in ${DARKPINK}$charm${NORMAL}'s metadata or raise it." >&2
|
|
return 1
|
|
fi
|
|
mixins+=(
|
|
"mem_limit: $docker_memory"
|
|
"memswap_limit: $docker_memory"
|
|
)
|
|
fi
|
|
fi
|
|
## Final merging
|
|
|
|
mixin=$(merge_yaml_str "${mixins[@]}") || {
|
|
err "Failed to merge mixins from ${DARKPINK}${charm}${NORMAL} metadata."
|
|
return 1
|
|
}
|
|
echo "$mixin" | tee "$cache_file"
|
|
}
|
|
export -f _get_docker_compose_mixin_from_metadata_cached
|
|
|
|
|
|
get_docker_compose_mixin_from_metadata() {
|
|
local service="$1" cache_file="$state_tmpdir/$FUNCNAME.cache.$1"
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: SESSION cache hit ($*)"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
charm=$(get_service_charm "$service") || return 1
|
|
metadata="$(charm.metadata "$charm" 2>/dev/null)" || return 1
|
|
has_build_dir=
|
|
[ -d "$(charm.get_dir "$charm")/build" ] && has_build_dir=true
|
|
mixin=$(_get_docker_compose_mixin_from_metadata_cached "$service" "$charm" "$metadata" "$has_build_dir") || return 1
|
|
echo "$mixin" | tee "$cache_file"
|
|
}
|
|
export -f get_docker_compose_mixin_from_metadata
|
|
|
|
|
|
_save() {
|
|
local name="$1"
|
|
cat - | tee -a "$docker_compose_dir/.data/$name"
|
|
}
|
|
export -f _save
|
|
|
|
|
|
get_default_project_name() {
|
|
if [ -n "$DEFAULT_PROJECT_NAME" ]; then
|
|
echo "$DEFAULT_PROJECT_NAME"
|
|
return 0
|
|
fi
|
|
local normalized_path compose_yml_location name
|
|
compose_yml_location="$(get_compose_yml_location)" || return 1
|
|
if [ -n "$compose_yml_location" ]; then
|
|
if normalized_path=$(readlink -f "$compose_yml_location"); then
|
|
name="${normalized_path%/*}" ## dirname
|
|
name="${name##*/}" ## basename
|
|
name="${name%%-deploy}" ## remove any '-deploy'
|
|
name="${name,,}" ## lowercase
|
|
e "$name"
|
|
return 0
|
|
fi
|
|
fi
|
|
echo "orphan"
|
|
return 0
|
|
}
|
|
export -f get_default_project_name
|
|
|
|
|
|
get_running_compose_containers() {
|
|
## XXXvlab: docker bug: there will be a final newline anyway
|
|
docker ps --filter label="compose.service" --format='{{.ID}}'
|
|
}
|
|
export -f get_running_compose_containers
|
|
|
|
|
|
get_healthy_container_ip_for_service () {
|
|
local service="$1" port="$2" timeout=${3:-60}
|
|
|
|
local containers container container_network container_ip
|
|
|
|
containers="$(get_running_containers_for_service "$service")"
|
|
|
|
if [ -z "$containers" ]; then
|
|
err "No containers running for service $DARKYELLOW$service$NORMAL."
|
|
return 1
|
|
fi
|
|
|
|
## XXXvlab: taking first container is probably not a good idea
|
|
container="$(echo "$containers" | head -n 1)"
|
|
|
|
## XXXvlab: taking first ip is probably not a good idea
|
|
read-0 container_network container_ip < <(get_container_network_ip "$container")
|
|
|
|
if [ -z "$container_ip" ]; then
|
|
err "Can't get container's IP. You should check health of" \
|
|
"${DARKYELLOW}$service${NORMAL}'s container."
|
|
return 1
|
|
fi
|
|
|
|
wait_for_tcp_port "$container_network" "$container_ip:$port" "$timeout" || {
|
|
err "TCP port of ${DARKYELLOW}$service${NORMAL}'s container doesn't seem open"
|
|
echo " Please check that container is healthy. Here are last logs:" >&2
|
|
docker logs "$container" --tail=10 | prefix " ${GRAY}|${NORMAL} " >&2
|
|
return 1
|
|
}
|
|
info "Host/Port ${container_ip}:${port} checked ${GREEN}open${NORMAL}."
|
|
echo "$container_network:$container_ip"
|
|
}
|
|
export -f get_healthy_container_ip_for_service
|
|
|
|
|
|
switch_to_relation_service() {
|
|
local relation="$1"
|
|
|
|
## XXXvlab: can't get real config here
|
|
if ! read-0 ts _ _ < <(get_service_relation "$SERVICE_NAME" "$relation"); then
|
|
err "Couldn't find relation ${DARKCYAN}$relation${NORMAL}."
|
|
return 1
|
|
fi
|
|
|
|
export SERVICE_NAME="$ts"
|
|
export SERVICE_DATASTORE="$DATASTORE/$SERVICE_NAME"
|
|
DOCKER_BASE_IMAGE=$(service_ensure_image_ready "$SERVICE_NAME")
|
|
export DOCKER_BASE_IMAGE
|
|
|
|
target_charm=$(get_service_charm "$ts") || return 1
|
|
target_charm_path=$(charm.get_dir "$target_charm") || return 1
|
|
|
|
cd "$target_charm_path"
|
|
|
|
}
|
|
export -f switch_to_relation_service
|
|
|
|
|
|
get_volumes_for_container() {
|
|
local container="$1"
|
|
|
|
docker inspect \
|
|
--format '{{range $mount := .Mounts}}{{$mount.Source}}{{"\x00"}}{{$mount.Destination}}{{"\x00"}}{{end}}' \
|
|
"$container"
|
|
}
|
|
export -f get_volumes_for_container
|
|
|
|
is_volume_used() {
|
|
local volume="$1" container_id src dst
|
|
|
|
while read -r container_id; do
|
|
while read-0 src dst; do
|
|
[[ "$src/" == "$volume"/* ]] && return 0
|
|
done < <(get_volumes_for_container "$container_id")
|
|
done < <(get_running_compose_containers)
|
|
return 1
|
|
}
|
|
export -f is_volume_used
|
|
|
|
|
|
clean_unused_docker_compose() {
|
|
for f in /var/lib/compose/docker-compose/*; do
|
|
[ -e "$f" ] || continue
|
|
is_volume_used "$f" && continue
|
|
debug "Cleaning unused docker-compose ${f##*/}"
|
|
rm -rf "$f" || return 1
|
|
done
|
|
return 0
|
|
}
|
|
export -f clean_unused_docker_compose
|
|
|
|
|
|
docker_compose_store() {
|
|
local file="$1" sha
|
|
|
|
sha=$(hash_get 64 < "$file") || return 1
|
|
project=$(get_default_project_name) || return 1
|
|
dst="/var/lib/compose/docker-compose/$sha/$project"
|
|
mkdir -p "$dst" || return 1
|
|
cat <<EOF > "$dst/.env" || return 1
|
|
DOCKER_COMPOSE_PATH=$dst
|
|
COMPOSE_HTTP_TIMEOUT=7200
|
|
EOF
|
|
cp "$file" "$dst/docker-compose.yml" || return 1
|
|
|
|
mkdir -p "$dst/bin" || return 1
|
|
|
|
cat <<EOF > "$dst/bin/dc" || return 1
|
|
#!/bin/bash
|
|
|
|
$(declare -f read-0)
|
|
|
|
docker_run_opts=()
|
|
|
|
while read-0 opt; do
|
|
if [[ "\$opt" == "!env:"* ]]; then
|
|
opt="\${opt##!env:}"
|
|
var="\${opt%%=*}"
|
|
value="\${opt#*=}"
|
|
export "\$var"="\$value"
|
|
else
|
|
docker_run_opts+=("\$opt")
|
|
fi
|
|
done < <(cat "$COMPOSE_LAUNCHER_OPTS")
|
|
|
|
docker_run_opts+=(
|
|
"-w" "$dst"
|
|
"--entrypoint" "/usr/local/bin/docker-compose"
|
|
)
|
|
[ -t 1 ] && {
|
|
docker_run_opts+=("-ti")
|
|
}
|
|
|
|
exec docker run --rm "\${docker_run_opts[@]}" "\${COMPOSE_DOCKER_IMAGE:-docker.0k.io/compose}" "\$@"
|
|
|
|
EOF
|
|
|
|
chmod +x "$dst/bin/dc" || return 1
|
|
|
|
printf "%s" "$sha"
|
|
}
|
|
export -f docker_compose_store
|
|
|
|
|
|
launch_docker_compose() {
|
|
local charm docker_compose_tmpdir docker_compose_dir
|
|
docker_compose_tmpdir=$(mktemp -d -t tmp.XXXXXXXXXX)
|
|
#debug "Creating temporary docker-compose directory in '$docker_compose_tmpdir'."
|
|
trap_add EXIT "rm -rf \"$docker_compose_tmpdir\""
|
|
|
|
## docker-compose will name network from the parent dir name
|
|
project=$(get_default_project_name)
|
|
mkdir -p "$docker_compose_tmpdir/$project"
|
|
docker_compose_dir="$docker_compose_tmpdir/$project"
|
|
|
|
if [ -z "$_CURRENT_DOCKER_COMPOSE" ]; then
|
|
err "${FUNCNAME[0]} is meant to be called after"\
|
|
"\$_CURRENT_DOCKER_COMPOSE has been calculated."
|
|
echo " Called by:" >&2
|
|
printf " - %s\n" "${FUNCNAME[@]:1}" >&2
|
|
return 1
|
|
fi
|
|
cat "$_CURRENT_DOCKER_COMPOSE" > "$docker_compose_dir/docker-compose.yml" || return 1
|
|
if [ -e "$state_tmpdir/to-merge-in-docker-compose.yml" ]; then
|
|
# debug "Merging some config data in docker-compose.yml:"
|
|
# debug "$(cat $state_tmpdir/to-merge-in-docker-compose.yml)"
|
|
_config_merge "$docker_compose_dir/docker-compose.yml" "$state_tmpdir/to-merge-in-docker-compose.yml" || return 1
|
|
fi
|
|
|
|
if [ -z "$(echo $(cat "$docker_compose_dir/docker-compose.yml"))" ]; then
|
|
die "Generated 'docker-compose.yml' is unexpectedly empty."
|
|
fi
|
|
|
|
## XXXvlab: could be more specific and only link the needed charms
|
|
## XXXvlab: why do we need these links ? If this is for the build command, then it is not useful anymore.
|
|
# for charm in $(shyaml keys services < "$docker_compose_dir/docker-compose.yml"); do
|
|
# if charm.exists "$charm"; then
|
|
# ln -sf "$(charm.get_dir "$charm")" "$docker_compose_dir/$charm" || exit 1
|
|
# fi
|
|
# done
|
|
mkdir "$docker_compose_dir/.data"
|
|
|
|
if [ -z "$COMPOSE_DISABLE_DOCKER_COMPOSE_STORE" ]; then
|
|
sha=$(docker_compose_store "$docker_compose_dir/docker-compose.yml") || return 1
|
|
fi
|
|
|
|
{
|
|
{
|
|
{
|
|
if [ -z "$COMPOSE_DISABLE_DOCKER_COMPOSE_STORE" ]; then
|
|
cd "/var/lib/compose/docker-compose/$sha/$project" || return 1
|
|
else
|
|
cd "$docker_compose_dir" || return 1
|
|
fi
|
|
if [ -f ".env" ]; then
|
|
debug "${WHITE}.env$NORMAL for $DARKYELLOW$SERVICE_PACK$NORMAL:"
|
|
debug "$(cat ".env" | prefix " $GRAY|$NORMAL ")"
|
|
fi
|
|
debug "${WHITE}docker-compose.yml$NORMAL for $DARKYELLOW$SERVICE_PACK$NORMAL:"
|
|
debug "$(cat "docker-compose.yml" | prefix " $GRAY|$NORMAL ")"
|
|
debug "${WHITE}Launching$NORMAL: docker-compose $@"
|
|
if [ "$DRY_COMPOSE_RUN" ]; then
|
|
echo docker-compose "$@"
|
|
else
|
|
docker-compose "$@"
|
|
fi
|
|
echo "$?" > "$docker_compose_dir/.data/errlvl"
|
|
} | _save stdout
|
|
} 3>&1 1>&2 2>&3 | _save stderr
|
|
} 3>&1 1>&2 2>&3
|
|
if tail -n 1 "$docker_compose_dir/.data/stderr" | grep -E "Service .+ failed to build: Error getting container [0-9a-f]+ from driver devicemapper: (open|Error mounting) /dev/mapper/docker-.*: no such file or directory$" >/dev/null 2>&1; then
|
|
err "Detected bug https://github.com/docker/docker/issues/4036 ... "
|
|
err "Please re-launch your command, or switch from 'devicemapper' driver to 'overlayfs' or 'aufs'."
|
|
fi
|
|
|
|
docker_compose_errlvl="$(cat "$docker_compose_dir/.data/errlvl" 2>/dev/null)"
|
|
if [ -z "$docker_compose_errlvl" ]; then
|
|
err "Something went wrong before you could gather docker-compose errorlevel."
|
|
return 1
|
|
fi
|
|
return "$docker_compose_errlvl"
|
|
}
|
|
export -f launch_docker_compose
|
|
|
|
|
|
get_compose_yml_location() {
|
|
if ! [ -z ${COMPOSE_YML_FILE+x} ]; then ## if set, even if empty
|
|
echo "$COMPOSE_YML_FILE"
|
|
return 0
|
|
fi
|
|
parent=$(while ! [ -f "./compose.yml" ]; do
|
|
[ "$PWD" == "/" ] && exit 0
|
|
cd ..
|
|
done; echo "$PWD"
|
|
)
|
|
if [ "$parent" ]; then
|
|
echo "$parent/compose.yml"
|
|
return 0
|
|
fi
|
|
## XXXvlab: do we need this additional environment variable,
|
|
## COMPOSE_YML_FILE is not sufficient ?
|
|
if [ "$DEFAULT_COMPOSE_FILE" ]; then
|
|
if ! [ -e "$DEFAULT_COMPOSE_FILE" ]; then
|
|
warn "No 'compose.yml' was found in current or parent dirs," \
|
|
"and \$DEFAULT_COMPOSE_FILE points to an unexistent file." \
|
|
"(${DEFAULT_COMPOSE_FILE})"
|
|
return 0
|
|
fi
|
|
echo "$DEFAULT_COMPOSE_FILE"
|
|
return 0
|
|
fi
|
|
warn "No 'compose.yml' was found in current or parent dirs, and no \$DEFAULT_COMPOSE_FILE was set."
|
|
return 0
|
|
}
|
|
export -f get_compose_yml_location
|
|
|
|
|
|
get_compose_yml_content() {
|
|
local cache_file="$state_tmpdir/$FUNCNAME.cache"
|
|
if [ -e "$cache_file" ]; then
|
|
cat "$cache_file" &&
|
|
touch "$cache_file" || return 1
|
|
return 0
|
|
fi
|
|
|
|
if [ -z "$COMPOSE_YML_FILE" ]; then
|
|
COMPOSE_YML_FILE=$(get_compose_yml_location) || return 1
|
|
fi
|
|
if [ -e "$COMPOSE_YML_FILE" ]; then
|
|
# debug "Found $WHITE$exname$NORMAL YAML file in '$COMPOSE_YML_FILE'."
|
|
COMPOSE_YML_CONTENT=$(cat "$COMPOSE_YML_FILE") || {
|
|
err "Could not read '$COMPOSE_YML_FILE'."
|
|
return 1
|
|
}
|
|
else
|
|
debug "No compose file found. Using an empty one."
|
|
COMPOSE_YML_CONTENT=""
|
|
fi
|
|
|
|
COMPOSE_YML_CONTENT=$(merge_yaml_str "$COMPOSE_YML_CONTENT" "${compose_contents[@]}") || return 1
|
|
output=$(echo "$COMPOSE_YML_CONTENT"| shyaml get-value 2>&1)
|
|
if [ "$?" != 0 ]; then
|
|
outputed_something=
|
|
while IFS='' read -r line1 && IFS='' read -r line2; do
|
|
[ "$outputed_something" ] || err "Invalid YAML in '$COMPOSE_YML_FILE':"
|
|
outputed_something=true
|
|
echo "$line1 $GRAY($line2)$NORMAL"
|
|
done < <(echo "$output" | grep ^yaml.scanner -A 100 |
|
|
sed -r 's/^ in "<stdin>", //g' | sed -r 's/^yaml.scanner.[a-zA-Z]+: //g') |
|
|
prefix " $GRAY|$NORMAL "
|
|
[ "$outputed_something" ] || {
|
|
err "Unexpected error while running 'shyaml get-value' on '$COMPOSE_YML_FILE':"
|
|
echo "$output" | prefix " $GRAY|$NORMAL "
|
|
}
|
|
return 1
|
|
fi
|
|
echo "$COMPOSE_YML_CONTENT" | tee "$cache_file" || return 1
|
|
}
|
|
export -f get_compose_yml_content
|
|
|
|
compose:yml:hash() {
|
|
local cache_file="$state_tmpdir/$FUNCNAME.cache"
|
|
if [ -e "$cache_file" ]; then
|
|
cat "$cache_file" &&
|
|
touch "$cache_file" || return 1
|
|
return 0
|
|
fi
|
|
|
|
compose_yml_content=$(get_compose_yml_content) || return 1
|
|
compose_yml_hash=$(echo "$compose_yml_content" | hash_get) || return 1
|
|
|
|
e "$compose_yml_hash" | tee "$cache_file" || return 1
|
|
}
|
|
export -f compose:yml:hash
|
|
|
|
|
|
compose:yml:root:services() {
|
|
local cache_file="$state_tmpdir/$FUNCNAME.cache" services compose_yml_content
|
|
if [ -e "$cache_file" ]; then
|
|
cat "$cache_file" &&
|
|
touch "$cache_file" || return 1
|
|
return 0
|
|
fi
|
|
|
|
compose_yml_content=$(get_compose_yml_content) || return 1
|
|
services=($(e "$compose_yml_content" | shyaml keys)) || return 1
|
|
|
|
e "${services[*]}" | tee "$cache_file" || return 1
|
|
}
|
|
export -f compose:yml:root:services
|
|
|
|
|
|
get_default_target_services() {
|
|
local services=("$@")
|
|
if [ -z "${services[*]}" ]; then
|
|
if [ "$DEFAULT_SERVICES" ]; then
|
|
debug "No service provided, using $WHITE\$DEFAULT_SERVICES$NORMAL variable." \
|
|
"Target services: $DARKYELLOW$DEFAULT_SERVICES$NORMAL"
|
|
services="$DEFAULT_SERVICES"
|
|
else
|
|
err "No service provided."
|
|
return 1
|
|
fi
|
|
fi
|
|
echo "${services[*]}"
|
|
}
|
|
export -f get_default_target_services
|
|
|
|
|
|
get_master_services() {
|
|
local loaded master_service service
|
|
local cache_file="$CACHEDIR/$FUNCNAME.cache.$(H "$@" )"
|
|
if [ -e "$cache_file" ]; then
|
|
cat "$cache_file" &&
|
|
touch "$cache_file" || return 1
|
|
return 0
|
|
fi
|
|
declare -A loaded
|
|
for service in "$@"; do
|
|
master_service=$(get_top_master_service_for_service "$service") || return 1
|
|
if [ "${loaded[$master_service]}" ]; then
|
|
continue
|
|
fi
|
|
echo "$master_service"
|
|
loaded["$master_service"]=1
|
|
done > "$cache_file".wip || return 1
|
|
|
|
mv "$cache_file"{.wip,} || return 1
|
|
cat "$cache_file" || return 1
|
|
}
|
|
export -f get_master_services
|
|
|
|
|
|
get_current_docker_container_id() {
|
|
local line
|
|
line=$(cat "/proc/self/cpuset") || return 1
|
|
[[ "$line" == *docker* ]] || return 1
|
|
echo "${line##*/}"
|
|
}
|
|
export -f get_current_docker_container_id
|
|
|
|
|
|
## if we are in a docker compose, we might want to know what is the
|
|
## real host path of some local paths.
|
|
get_host_path() {
|
|
local path="$1"
|
|
path=$(realpath "$path") || return 1
|
|
container_id=$(get_current_docker_container_id) || {
|
|
print "%s" "$path"
|
|
return 0
|
|
}
|
|
biggest_dst=
|
|
current_src=
|
|
while read-0 src dst; do
|
|
[[ "$path" == "$dst"* ]] || continue
|
|
if [[ "${#biggest_dst}" < "${#dst}" ]]; then
|
|
biggest_dst="$dst"
|
|
current_src="$src"
|
|
fi
|
|
done < <(get_volumes_for_container "$container_id")
|
|
if [ "$current_src" ]; then
|
|
printf "%s" "$current_src"
|
|
else
|
|
return 1
|
|
fi
|
|
}
|
|
export -f get_host_path
|
|
|
|
|
|
_setup_state_dir() {
|
|
export state_tmpdir=$(mktemp -d -t tmp.XXXXXXXXXX)
|
|
#debug "Creating temporary state directory in '$state_tmpdir'."
|
|
# trap_add EXIT "debug \"Removing temporary state directory in $state_tmpdir.\";\
|
|
# rm -rf \"$state_tmpdir\""
|
|
trap_add EXIT "rm -rf \"$state_tmpdir\""
|
|
}
|
|
|
|
|
|
get_docker_compose_help_msg() {
|
|
local action="$1" cache_file="$CACHEDIR/$FUNCNAME.cache.$({ p0 "$1"; cat "$(which docker-compose)"; } | md5_compat)" \
|
|
docker_compose_help_msg
|
|
if [ -e "$cache_file" ]; then
|
|
cat "$cache_file" &&
|
|
touch "$cache_file" || return 1
|
|
return 0
|
|
fi
|
|
docker_compose_help_msg=$(docker-compose $action --help 2>/dev/null) || return 1
|
|
echo "$docker_compose_help_msg" |
|
|
tee "$cache_file" || return 1
|
|
}
|
|
|
|
|
|
get_docker_compose_usage() {
|
|
local action="$1" cache_file="$CACHEDIR/$FUNCNAME.cache.$({ p0 "$1"; cat "$(which docker-compose)"; } | md5_compat)" \
|
|
docker_compose_help_msg
|
|
if [ -e "$cache_file" ]; then
|
|
cat "$cache_file" &&
|
|
touch "$cache_file" || return 1
|
|
return 0
|
|
fi
|
|
docker_compose_help_msg=$(get_docker_compose_help_msg $action) || return 1
|
|
echo "$docker_compose_help_msg" |
|
|
grep -m 1 "^Usage:" -A 10000 |
|
|
egrep -m 1 "^\$" -B 10000 |
|
|
nspc |
|
|
sed -r 's/^Usage: //g' |
|
|
tee "$cache_file" || return 1
|
|
}
|
|
|
|
|
|
get_docker_compose_opts_help() {
|
|
local action="$1" cache_file="$CACHEDIR/$FUNCNAME.cache.$({ p0 "$1"; cat "$(which docker-compose)"; } | md5_compat)" \
|
|
docker_compose_help_msg
|
|
if [ -e "$cache_file" ]; then
|
|
cat "$cache_file" &&
|
|
touch "$cache_file" || return 1
|
|
return 0
|
|
fi
|
|
docker_compose_opts_help=$(get_docker_compose_help_msg $action) || return 1
|
|
echo "$docker_compose_opts_help" |
|
|
grep '^Options:' -A 20000 |
|
|
tail -n +2 |
|
|
{ cat ; echo; } |
|
|
egrep -m 1 "^\S*\$" -B 10000 |
|
|
head -n -1 |
|
|
tee "$cache_file" || return 1
|
|
}
|
|
|
|
|
|
get_docker_compose_commands_help() {
|
|
local action="$1" cache_file="$CACHEDIR/$FUNCNAME.cache.$({ p0 "$1"; cat "$(which docker-compose)"; } | md5_compat)" \
|
|
docker_compose_help_msg
|
|
if [ -e "$cache_file" ]; then
|
|
cat "$cache_file" &&
|
|
touch "$cache_file" || return 1
|
|
return 0
|
|
fi
|
|
docker_compose_opts_help=$(get_docker_compose_help_msg $action) || return 1
|
|
echo "$docker_compose_opts_help" |
|
|
grep '^Commands:' -A 20000 |
|
|
tail -n +2 |
|
|
{ cat ; echo; } |
|
|
egrep -m 1 "^\S*\$" -B 10000 |
|
|
head -n -1 |
|
|
tee "$cache_file" || return 1
|
|
}
|
|
|
|
|
|
get_docker_compose_opts_list() {
|
|
local action="$1" cache_file="$CACHEDIR/$FUNCNAME.cache.$({ p0 "$1"; cat "$(which docker-compose)"; } | md5_compat)" \
|
|
docker_compose_help_msg
|
|
if [ -e "$cache_file" ]; then
|
|
cat "$cache_file" &&
|
|
touch "$cache_file" || return 1
|
|
return 0
|
|
fi
|
|
docker_compose_opts_help=$(get_docker_compose_opts_help $action) || return 1
|
|
echo "$docker_compose_opts_help" |
|
|
egrep "^\s+-" |
|
|
sed -r 's/\s+((((-[a-zA-Z]|--[a-zA-Z0-9-]+)( [A-Z=]+|=[^ ]+)?)(, )?)+)\s+.*$/\1/g' |
|
|
tee "$cache_file" || return 1
|
|
}
|
|
|
|
|
|
options_parser() {
|
|
sed -r 's/^(\s+(((-[a-zA-Z]|--[a-zA-Z0-9-]+)([ =]([a-zA-Z_=\"\[]|\])+)?(, | )?)+)\s+)[^ ].*$/\x0\2\x0\0/g'
|
|
printf "\0"
|
|
}
|
|
|
|
|
|
remove_options_in_option_help_msg() {
|
|
{
|
|
read-0 null
|
|
if [ "$null" ]; then
|
|
err "options parsing error, should start with an option line."
|
|
return 1
|
|
fi
|
|
while read-0 opt full_txt;do
|
|
multi_opts="$(printf "%s " $opt | multi_opts_filter)"
|
|
single_opts="$(printf "%s " $opt | single_opts_filter)"
|
|
for to_remove in "$@"; do
|
|
str_matches "$to_remove" $multi_opts $single_opts && {
|
|
continue 2
|
|
}
|
|
done
|
|
echo -n "$full_txt"
|
|
done
|
|
} < <(options_parser)
|
|
}
|
|
|
|
|
|
_MULTIOPTION_REGEX='^((-[a-zA-Z]|--[a-zA-Z0-9-]+)(, )?)+'
|
|
_MULTIOPTION_REGEX_LINE_FILTER=$_MULTIOPTION_REGEX'(\s|=)'
|
|
|
|
|
|
multi_opts_filter() {
|
|
egrep "$_MULTIOPTION_REGEX_LINE_FILTER" |
|
|
sed -r "s/^($_MULTIOPTION_REGEX)(\s|=).*$/\1/g" |
|
|
tr ',' "\n" | nspc
|
|
}
|
|
|
|
|
|
single_opts_filter() {
|
|
egrep -v "$_MULTIOPTION_REGEX_LINE_FILTER" |
|
|
tr ',' "\n" | nspc
|
|
}
|
|
|
|
|
|
get_docker_compose_multi_opts_list() {
|
|
local action="$1" opts_list
|
|
opts_list=$(get_docker_compose_opts_list "$action") || return 1
|
|
echo "$opts_list" | multi_opts_filter
|
|
}
|
|
|
|
|
|
get_docker_compose_single_opts_list() {
|
|
local action="$1" opts_list
|
|
opts_list=$(get_docker_compose_opts_list "$action") || return 1
|
|
echo "$opts_list" | single_opts_filter
|
|
}
|
|
|
|
display_commands_help() {
|
|
local charm_actions
|
|
echo
|
|
echo "${WHITE}Commands${NORMAL} (added by compose):"
|
|
echo " ${DARKCYAN}cache${NORMAL} Control compose's cache"
|
|
echo " ${DARKCYAN}status${NORMAL} Display statuses of services"
|
|
echo
|
|
echo "${WHITE}Commands${NORMAL} (thanks to docker-compose):"
|
|
get_docker_compose_commands_help | sed -r "s/ ([a-z]+)(\s+)/ ${DARKCYAN}\1${NORMAL}\2/g"
|
|
charm_actions_help=$(get_docker_charm_action_help) || return 1
|
|
if [ "$charm_actions_help" ]; then
|
|
echo
|
|
echo "${WHITE}Charm actions${NORMAL}:"
|
|
printf "%s\n" "$charm_actions_help" | \
|
|
sed -r "s/^ ([a-z0-9-]+)(\s+)([a-z0-9-]+)(\s+)/ ${DARKCYAN}\1${NORMAL}\2${DARKYELLOW}\3${NORMAL}\4/g"
|
|
fi
|
|
}
|
|
|
|
get_docker_charm_action() {
|
|
local services service charm relation_name target_service relation_config \
|
|
target_charm services
|
|
## XXXvlab: this is for get_service_relations
|
|
NO_CONSTRAINT_CHECK=True service:all:set_relations_hash || {
|
|
err-d "Failed to set relations hash."
|
|
return 1
|
|
}
|
|
services=($(get_all_services)) || return 1
|
|
for service in "${services[@]}"; do
|
|
printf "%s:\n" "$service"
|
|
charm=$(get_service_charm "$service") || return 1
|
|
for action in $(charm.ls_direct_actions "$charm"); do
|
|
printf " %s:\n" "$action"
|
|
printf " type: %s\n" "direct"
|
|
done
|
|
while read-0 relation_name target_service _relation_config _tech_dep; do
|
|
target_charm=$(get_service_charm "$target_service") || return 1
|
|
for action in $(charm.ls_relation_actions "$target_charm" "$relation_name"); do
|
|
printf " %s:\n" "$action"
|
|
printf " type: %s\n" "indirect"
|
|
printf " inherited: %s\n" "$target_charm"
|
|
done
|
|
done < <(get_service_relations "$service")
|
|
done
|
|
}
|
|
export -f get_docker_charm_action
|
|
|
|
get_docker_charm_action_help() {
|
|
local services service charm relation_name target_service relation_config \
|
|
target_charm
|
|
## XXXvlab: this is for get_service_relations
|
|
NO_CONSTRAINT_CHECK=True service:all:set_relations_hash || {
|
|
err-d "Failed to set relations hash."
|
|
return 1
|
|
}
|
|
services=($(get_all_services)) || return 1
|
|
for service in "${services[@]}"; do
|
|
out=$(
|
|
charm=$(get_service_charm "$service") || return 1
|
|
for action in $(charm.ls_direct_actions "$charm"); do
|
|
printf " %-28s %s\n" "$action $service" "Direct action from ${DARKPINK}$charm${NORMAL}"
|
|
done
|
|
while read-0 relation_name target_service _relation_config _tech_dep; do
|
|
target_charm=$(get_service_charm "$target_service") || return 1
|
|
for action in $(charm.ls_relation_actions "$target_charm" "$relation_name"); do
|
|
printf " %-28s %s\n" "$action $service" "Indirect action from ${DARKPINK}$target_charm${NORMAL}"
|
|
done
|
|
done < <(get_service_relations "$service")
|
|
)
|
|
if [ "$out" ]; then
|
|
echo " for ${DARKYELLOW}$service${NORMAL}:"
|
|
printf "%s\n" "$out"
|
|
fi
|
|
done
|
|
}
|
|
|
|
display_help() {
|
|
print_help
|
|
echo "${WHITE}Usage${NORMAL}:"
|
|
echo " $usage"
|
|
echo " $usage cache {clean|clear}"
|
|
echo "${WHITE}Options${NORMAL}:"
|
|
echo " -h, --help Print this message and quit"
|
|
echo " (ignoring any other options)"
|
|
echo " -V, --version Print current version and quit"
|
|
echo " (ignoring any other options)"
|
|
echo " --dirs Display data dirs and quit"
|
|
echo " (ignoring any other options)"
|
|
echo " --get-project-name Display project name and quit"
|
|
echo " (ignoring any other options)"
|
|
echo " --get-available-actions Display all available actions and quit"
|
|
echo " (ignoring any other options)"
|
|
echo " -v, --verbose Be more verbose"
|
|
echo " -q, --quiet Be quiet"
|
|
echo " -d, --debug Print full debugging information (sets also verbose)"
|
|
echo " --dry-compose-run If docker-compose will be run, only print out what"
|
|
echo " command line will be used."
|
|
echo " --no-relations Do not run any relation script"
|
|
echo " --no-hooks Do not run any hook script"
|
|
echo " --no-init Do not run any init script"
|
|
echo " --no-post-deploy Do not run any post-deploy script"
|
|
echo " --no-pre-deploy Do not run any pre-deploy script"
|
|
echo " --without-relation RELATION "
|
|
echo " Do not run given relation"
|
|
echo " -R, --rebuild-relations-to-service SERVICE"
|
|
echo " Will rebuild all relations to given service"
|
|
echo " --add-compose-content, -Y YAML"
|
|
echo " Will merge some direct YAML with the current compose"
|
|
echo " -c, --color Force color mode (default is to detect if in tty mode)"
|
|
echo " --push-builds Will push cached docker images to docker cache registry"
|
|
|
|
get_docker_compose_opts_help | remove_options_in_option_help_msg --version --help --verbose |
|
|
filter_docker_compose_help_message
|
|
display_commands_help
|
|
}
|
|
|
|
|
|
_graph_service() {
|
|
local service="$1" base="$1"
|
|
|
|
charm=$(get_service_charm "$service") || return 1
|
|
metadata=$(charm.metadata "$charm") || return 1
|
|
subordinate=$(echo "$metadata" | shyaml get-value "subordinate" 2>/dev/null)
|
|
|
|
if [[ "$subordinate" =~ ^True|true$ ]]; then
|
|
requires="$(echo "$metadata" | shyaml get-value "requires" 2>/dev/null)"
|
|
master_charm=
|
|
while read-0 relation_name relation; do
|
|
[ "$(echo "$relation" | shyaml get-value "scope" 2>/dev/null)" == "container" ] || continue
|
|
interface="$(echo "$relation" | shyaml get-value "interface" 2>/dev/null)"
|
|
if [ -z "$interface" ]; then
|
|
err "No ${WHITE}$interface${NORMAL} set for relation $relation_name."
|
|
return 1
|
|
fi
|
|
|
|
## Action provided by relation ?
|
|
|
|
target_service=
|
|
while read-0 relation_name candidate_target_service _relation_config _tech_dep; do
|
|
[ "$interface" == "$relation_name" ] && {
|
|
target_service="$candidate_target_service"
|
|
break
|
|
}
|
|
done < <(get_service_relations "$service")
|
|
if [ -z "$target_service" ]; then
|
|
err "Couldn't find ${WHITE}relations.$interface${NORMAL} in" \
|
|
"${DARKYELLOW}$service$NORMAL compose definition."
|
|
return 1
|
|
fi
|
|
master_service="$target_service"
|
|
master_charm=$(get_service_charm "$target_service") || return 1
|
|
break
|
|
done < <(echo "$requires" | shyaml key-values-0 2>/dev/null)
|
|
fi
|
|
|
|
_graph_node_service "$service" "$base" "$charm"
|
|
_graph_edge_service "$service" "$subordinate" "$master_service"
|
|
|
|
}
|
|
|
|
|
|
_graph_node_service() {
|
|
local service="$1" base="$2" charm="$3"
|
|
|
|
cat <<EOF
|
|
"$(_graph_node_service_label ${service})" [
|
|
style = "filled, $([[ "$subordinate" =~ ^True|true$ ]] && echo "dashed" || echo "bold")"
|
|
penwidth = $([[ "$subordinate" =~ ^True|true$ ]] && echo "3" || echo "5")
|
|
color = $([ "$base" ] && echo "blue" || echo "black")
|
|
fillcolor = "white"
|
|
fontname = "Courier New"
|
|
shape = "Mrecord"
|
|
label =<$(_graph_node_service_content "$service")>
|
|
];
|
|
EOF
|
|
|
|
}
|
|
|
|
|
|
_graph_edge_service() {
|
|
local service="$1" subordinate="$2" master_service="$3"
|
|
while read-0 relation_name target_service relation_config tech_dep; do
|
|
cat <<EOF
|
|
"$(_graph_node_service_label ${service})" -> "$(_graph_node_service_label ${target_service})" [
|
|
penwidth = $([ "$master_service" == "$target_service" ] && echo 3 || echo 2)
|
|
|
|
fontsize = 16
|
|
fontcolor = "black"
|
|
style = $([ "$master_service" == "$target_service" ] && echo dashed || echo "\"\"")
|
|
weight = $([ "$master_service" == "$target_service" ] && echo 2.0 || echo 1.0)
|
|
dir = $([ "$master_service" == "$target_service" ] && echo none || echo both)
|
|
arrowtail = odot
|
|
# arrowhead = dotlicurve
|
|
taillabel = "$relation_name" ];
|
|
EOF
|
|
done < <(get_service_relations "$service") || return 1
|
|
}
|
|
|
|
|
|
_graph_node_service_label() {
|
|
local service="$1"
|
|
echo "service_$service"
|
|
}
|
|
|
|
|
|
_graph_node_service_content() {
|
|
local service="$1"
|
|
charm=$(get_service_charm "$service") || return 1
|
|
|
|
cat <<EOF
|
|
<table border="0" cellborder="0" cellpadding="3" bgcolor="white">
|
|
<tr>
|
|
<td bgcolor="black" align="center" colspan="2">
|
|
<font color="white">$service</font>
|
|
</td>
|
|
</tr>
|
|
$(if [ "$charm" != "$service" ]; then
|
|
cat <<EOF2
|
|
<tr>
|
|
<td align="left" port="r0">charm: $charm</td>
|
|
</tr>
|
|
EOF2
|
|
fi)
|
|
</table>
|
|
EOF
|
|
}
|
|
|
|
|
|
cla_contains () {
|
|
local e
|
|
for e in "${@:2}"; do [[ "$e" == "$1" ]] && return 0; done
|
|
return 1
|
|
}
|
|
|
|
|
|
filter_docker_compose_help_message() {
|
|
cat - |
|
|
sed -r "s/docker-compose run/${DARKWHITE}compose${NORMAL} ${DARKCYAN}$action${NORMAL}/g;
|
|
s/docker-compose.yml/compose.yml/g;
|
|
s/SERVICES?/${DARKYELLOW}\0${NORMAL}/g;
|
|
s/^(\s+)\\$/\1${WHITE}\$${NORMAL}/g;
|
|
s/^(\s+)run/\1${DARKCYAN}$action${NORMAL}/g;
|
|
s/docker-compose/${DARKWHITE}compose${NORMAL}/g"
|
|
}
|
|
|
|
|
|
graph() {
|
|
local services=("$@")
|
|
declare -A entries
|
|
cat <<EOF
|
|
digraph g {
|
|
graph [
|
|
fontsize=30
|
|
labelloc="t"
|
|
label=""
|
|
splines=true
|
|
overlap=false
|
|
#rankdir = "LR"
|
|
];
|
|
ratio = auto;
|
|
EOF
|
|
for target_service in "$@"; do
|
|
services=$(get_ordered_service_dependencies "$target_service") || return 1
|
|
for service in $services; do
|
|
[ "${entries[$service]}" ] && continue || entries[$service]=1
|
|
if cla_contains "$service" "${services[@]}"; then
|
|
base=true
|
|
else
|
|
base=
|
|
fi
|
|
_graph_service "$service" "$base"
|
|
done
|
|
done
|
|
echo "}"
|
|
}
|
|
|
|
|
|
cached_wget() {
|
|
local cache_file="$CACHEDIR/$FUNCNAME.cache.$(p0 "$@" | md5_compat)" \
|
|
url="$1"
|
|
if [ -e "$cache_file" ]; then
|
|
cat "$cache_file"
|
|
touch "$cache_file"
|
|
return 0
|
|
fi
|
|
wget -O- "${url}" |
|
|
tee "$cache_file"
|
|
|
|
if [ "${PIPESTATUS[0]}" != 0 ]; then
|
|
rm "$cache_file"
|
|
die "Unable to fetch '$url'."
|
|
return 1
|
|
fi
|
|
}
|
|
export -f cached_wget
|
|
|
|
|
|
[ "$SOURCED" ] && return 0
|
|
|
|
|
|
trap_add "EXIT" clean_cache
|
|
|
|
export COMPOSE_DOCKER_REGISTRY="${COMPOSE_DOCKER_REGISTRY:-docker.0k.io}"
|
|
|
|
if [ -z "$DISABLE_SYSTEM_CONFIG_FILE" ]; then
|
|
if [ -r /etc/default/charm ]; then
|
|
. "/etc/default/charm"
|
|
fi
|
|
|
|
if [ -r "/etc/default/$exname" ]; then
|
|
. "/etc/default/$exname"
|
|
fi
|
|
|
|
## XXXvlab: should provide YML config opportunities in possible parent dirs ?
|
|
## userdir ? and global /etc/compose.yml ?
|
|
for cfgfile in /etc/compose.conf /etc/compose.local.conf \
|
|
/etc/default/compose /etc/compose/local.conf; do
|
|
[ -e "$cfgfile" ] || continue
|
|
. "$cfgfile" || die "Loading config file '$cfgfile' failed."
|
|
done
|
|
fi
|
|
|
|
_setup_state_dir
|
|
mkdir -p "$CACHEDIR" || exit 1
|
|
|
|
|
|
log () { cat; }
|
|
export -f log
|
|
|
|
##
|
|
## Argument parsing
|
|
##
|
|
|
|
wrap_opts=()
|
|
services=()
|
|
remainder_args=()
|
|
compose_opts=()
|
|
compose_contents=()
|
|
action_opts=()
|
|
services_args=()
|
|
pos_arg_ct=0
|
|
no_hooks=
|
|
no_init=
|
|
action=
|
|
stage="main" ## switches from 'main', to 'action', 'remainder'
|
|
is_docker_compose_action=
|
|
is_docker_compose_action_multi_service=
|
|
rebuild_relations_to_service=()
|
|
color=
|
|
declare -A without_relations
|
|
DC_MATCH_MULTI=$(get_docker_compose_multi_opts_list) &&
|
|
DC_MATCH_SINGLE=$(get_docker_compose_single_opts_list) || exit 1
|
|
while read-0 arg; do
|
|
case "$stage" in
|
|
"main")
|
|
case "$arg" in
|
|
--help|-h)
|
|
no_init=true ; no_hooks=true ; no_relations=true
|
|
display_help
|
|
exit 0
|
|
;;
|
|
--verbose|-v)
|
|
export VERBOSE=true
|
|
compose_opts+=("--verbose")
|
|
;;
|
|
--quiet|-q)
|
|
export QUIET=true
|
|
export wrap_opts+=("-q")
|
|
log () { cat >&2; }
|
|
export -f log
|
|
;;
|
|
--version|-V)
|
|
print_version
|
|
docker-compose --version
|
|
docker --version
|
|
exit 0
|
|
;;
|
|
-f|--file)
|
|
read-0 value
|
|
[ -e "$value" ] || die "File $value doesn't exists"
|
|
export COMPOSE_YML_FILE="$value"
|
|
shift
|
|
;;
|
|
-p|--project-name)
|
|
read-0 value
|
|
export DEFAULT_PROJECT_NAME="$value"
|
|
compose_opts+=("--project-name $value")
|
|
shift
|
|
;;
|
|
--color|-c)
|
|
if [ "$color" == "0" ]; then
|
|
err "Conflicting option --color with previous --no-ansi."
|
|
exit 1
|
|
fi
|
|
color=1
|
|
ansi_color yes
|
|
;;
|
|
--no-ansi)
|
|
if [ "$color" == "1" ]; then
|
|
err "Conflicting option --no-ansi with previous --color."
|
|
exit 1
|
|
fi
|
|
color=0
|
|
ansi_color no
|
|
compose_opts+=("--no-ansi")
|
|
;;
|
|
--no-relations)
|
|
export no_relations=true
|
|
;;
|
|
--without-relation)
|
|
read-0 value
|
|
without_relations["$value"]=1
|
|
shift
|
|
;;
|
|
--no-hooks)
|
|
export no_hooks=true
|
|
;;
|
|
--no-init)
|
|
export no_init=true
|
|
;;
|
|
--no-post-deploy)
|
|
export no_post_deploy=true
|
|
;;
|
|
--no-pre-deploy)
|
|
export no_pre_deploy=true
|
|
;;
|
|
--rebuild-relations-to-service|-R)
|
|
read-0 value
|
|
rebuild_relations_to_service+=("$value")
|
|
shift
|
|
;;
|
|
--push-builds)
|
|
export COMPOSE_PUSH_TO_REGISTRY=1
|
|
;;
|
|
--debug|-d)
|
|
export DEBUG=true
|
|
export VERBOSE=true
|
|
#compose_opts+=("--verbose" "--log-level" "DEBUG")
|
|
;;
|
|
--add-compose-content|-Y)
|
|
read-0 value
|
|
compose_contents+=("$value")
|
|
shift
|
|
;;
|
|
--dirs)
|
|
echo "CACHEDIR: $CACHEDIR"
|
|
echo "VARDIR: $VARDIR"
|
|
exit 0
|
|
;;
|
|
--get-project-name)
|
|
project=$(get_default_project_name) || exit 1
|
|
echo "$project"
|
|
exit 0
|
|
;;
|
|
--get-available-actions)
|
|
get_docker_charm_action
|
|
exit $?
|
|
;;
|
|
--dry-compose-run)
|
|
export DRY_COMPOSE_RUN=true
|
|
;;
|
|
--*|-*)
|
|
if str_pattern_matches "$arg" $DC_MATCH_MULTI; then
|
|
read-0 value
|
|
compose_opts+=("$arg" "$value")
|
|
shift;
|
|
elif str_pattern_matches "$arg" $DC_MATCH_SINGLE; then
|
|
compose_opts+=("$arg")
|
|
else
|
|
err "Unknown option '$arg'. Please check help:"
|
|
display_help >&2
|
|
exit 1
|
|
fi
|
|
;;
|
|
*)
|
|
action="$arg"
|
|
stage="action"
|
|
if DC_USAGE=$(get_docker_compose_usage "$action"); then
|
|
is_docker_compose_action=true
|
|
DC_MATCH_MULTI=$(get_docker_compose_multi_opts_list "$action") &&
|
|
DC_MATCH_SINGLE="$(get_docker_compose_single_opts_list "$action")"
|
|
if [ "$DC_MATCH_MULTI" ]; then
|
|
DC_MATCH_SINGLE="$DC_MATCH_SINGLE $(echo "$DC_MATCH_MULTI" | sed -r 's/( |$)/=\* /g')"
|
|
fi
|
|
pos_args=($(echo "$DC_USAGE" | sed -r 's/\[-[^]]+\] ?//g;s/\[options\] ?//g'))
|
|
pos_args=("${pos_args[@]:1}")
|
|
if [[ "${pos_args[0]}" == "[SERVICE...]" ]]; then
|
|
is_docker_compose_action_multi_service=1
|
|
elif [[ "${pos_args[0]}" == "SERVICE" ]]; then
|
|
is_docker_compose_action_multi_service=0
|
|
fi
|
|
|
|
# echo "USAGE: $DC_USAGE"
|
|
# echo "pos_args: ${pos_args[@]}"
|
|
# echo "MULTI: $DC_MATCH_MULTI"
|
|
# echo "SINGLE: $DC_MATCH_SINGLE"
|
|
# exit 1
|
|
else
|
|
stage="remainder"
|
|
fi
|
|
;;
|
|
esac
|
|
;;
|
|
"action") ## Only for docker-compose actions
|
|
case "$arg" in
|
|
--help|-h)
|
|
no_init=true ; no_hooks=true ; no_relations=true
|
|
action_opts+=("$arg")
|
|
;;
|
|
--*|-*)
|
|
if [ "$is_docker_compose_action" ]; then
|
|
if str_pattern_matches "$arg" $DC_MATCH_MULTI; then
|
|
read-0 value
|
|
action_opts+=("$arg" "$value")
|
|
shift
|
|
elif str_pattern_matches "$arg" $DC_MATCH_SINGLE; then
|
|
action_opts+=("$arg")
|
|
else
|
|
err "Unknown option '$arg'. Please check '${DARKCYAN}$action${NORMAL}' help:"
|
|
docker-compose "$action" --help |
|
|
filter_docker_compose_help_message >&2
|
|
exit 1
|
|
fi
|
|
fi
|
|
;;
|
|
*)
|
|
# echo "LOOP $1 : pos_arg: $pos_arg_ct // ${pos_args[$pos_arg_ct]}"
|
|
if [[ "${pos_args[$pos_arg_ct]}" == "[SERVICE...]" ]]; then
|
|
services_args+=("$arg")
|
|
elif [[ "${pos_args[$pos_arg_ct]}" == "SERVICE" ]]; then
|
|
services_args=("$arg") || exit 1
|
|
stage="remainder"
|
|
else
|
|
action_posargs+=("$arg")
|
|
((pos_arg_ct++))
|
|
fi
|
|
;;
|
|
esac
|
|
;;
|
|
"remainder")
|
|
remainder_args+=("$arg")
|
|
while read-0 arg; do
|
|
remainder_args+=("$arg")
|
|
done
|
|
break 3
|
|
;;
|
|
esac
|
|
shift
|
|
done < <(cla.normalize "$@")
|
|
|
|
## These actions are additions to docker-compose actions and charm
|
|
## actions
|
|
more_actions=(status)
|
|
|
|
if [[ "$action" == *" "* ]]; then
|
|
err "Invalid action name containing spaces: ${DARKCYAN}$action${NORMAL}"
|
|
exit 1
|
|
fi
|
|
|
|
is_more_action=
|
|
[[ " ${more_actions[*]} " == *" $action "* ]] && is_more_action=true
|
|
|
|
[ -n "$CACHEDIR" ] || die "No cache directory defined."
|
|
[ -d "$CACHEDIR" ] || die "Cache directory '$CACHEDIR' doesn't exists."
|
|
|
|
case "$action" in
|
|
cache)
|
|
case "${remainder_args[0]}" in
|
|
clean)
|
|
clean_cache
|
|
exit 0
|
|
;;
|
|
clear)
|
|
Wrap "${wrap_opts[@]}" -v -d "clear cache directory" -- rm -rf "$CACHEDIR/"*
|
|
|
|
## clear all docker caches
|
|
## image name are like '[$COMPOSE_DOCKER_REGISTRY]cache/charm/CHARM_NAME:HASH'
|
|
Wrap "${wrap_opts[@]}" -v -d "clear docker cache" <<EOF
|
|
docker images --format "{{.Repository}}:{{.Tag}}" |
|
|
egrep "^($COMPOSE_DOCKER_REGISTRY/)?cache/charm/[a-zA-Z0-9._-]+:[0-9a-f]{32,32}$" |
|
|
while read -r image; do
|
|
docker rmi "\$image" || true
|
|
done
|
|
EOF
|
|
exit 0
|
|
;;
|
|
*)
|
|
err "Unknown cache command: ${DARKCYAN}${remainder_args[0]}${NORMAL}"
|
|
exit 1
|
|
;;
|
|
esac
|
|
;;
|
|
status)
|
|
state_inner_cols=(name charm type state root)
|
|
state_all_services=
|
|
state_services=()
|
|
state_columns=()
|
|
state_columns_default=(name charm type state version)
|
|
state_filters=()
|
|
state_columns_default_msg=""
|
|
for col in "${state_columns_default[@]}"; do
|
|
if [ -n "$state_columns_default_msg" ]; then
|
|
state_columns_default_msg+=","
|
|
fi
|
|
state_columns_default_msg+="$col"
|
|
done
|
|
help="\
|
|
Display status information on services.
|
|
|
|
If no services are provided, all services in the root compose file
|
|
will be displayed. Use the --all option to display status of all
|
|
services (including dependencies).
|
|
|
|
$exname offers a few possible columns that can be complete on a charm
|
|
level by implementing an \`actions/get-COLNAME\` script.
|
|
|
|
These are the compose's columns: ${state_inner_cols[@]}.
|
|
|
|
Usage: status [options] [SERVICE...]
|
|
|
|
Options:
|
|
-h, --help Print this message and quit
|
|
-a, --all Display status of all services
|
|
(removes all filter, and will add a
|
|
'root' first column by default)
|
|
-c, --column Column to display, can provide several
|
|
separated by commas, or option can be repeated.
|
|
(default: ${state_columns_default_msg})
|
|
-f, --filter Filter services by a key=value pair,
|
|
separated by commas or can be repeated.
|
|
(default: --filter root=yes)
|
|
"
|
|
while read-0 arg; do
|
|
case "$arg" in
|
|
--help|-h)
|
|
echo "$help"
|
|
exit 0
|
|
;;
|
|
--all|-a)
|
|
if [ "${#state_services[@]}" -gt 0 ]; then
|
|
err "Cannot use --all and provide services at the same time."
|
|
exit 1
|
|
fi
|
|
if [[ "${#state_filters[@]}" -gt 0 ]]; then
|
|
err "Cannot use --all and provide filters at the same time."
|
|
exit 1
|
|
fi
|
|
state_all_services=1
|
|
;;
|
|
--column|-c)
|
|
read-0 value
|
|
if [[ "$value" == *,* ]]; then
|
|
state_columns+=(${value//,/ })
|
|
else
|
|
state_columns+=("$value")
|
|
fi
|
|
;;
|
|
--filter|-f)
|
|
if [ "${#state_services[@]}" -gt 0 ]; then
|
|
err "Cannot use --filter and provide services at the same time."
|
|
exit 1
|
|
fi
|
|
if [ -n "$state_all_services" ]; then
|
|
err "Cannot use --all and provide filters at the same time."
|
|
exit 1
|
|
fi
|
|
read-0 value
|
|
if [[ "$value" == *,* ]]; then
|
|
state_filters+=(${value//,/ })
|
|
else
|
|
state_filters+=("$value")
|
|
fi
|
|
;;
|
|
--*|-*)
|
|
err "Unknown option '$arg'. Please check help:"
|
|
echo "$help" >&2
|
|
;;
|
|
*)
|
|
if [ -n "$state_all_services" ]; then
|
|
err "Cannot use --all and provide services at the same time."
|
|
exit 1
|
|
fi
|
|
if [[ "${#state_filters[@]}" -gt 0 ]]; then
|
|
err "Cannot use --filter and provide filters at the same time."
|
|
exit 1
|
|
fi
|
|
state_services+=("$arg")
|
|
;;
|
|
esac
|
|
done < <(cla.normalize "${remainder_args[@]}")
|
|
|
|
if [ "${#state_columns[@]}" == 0 ]; then
|
|
state_columns=("${state_columns_default[@]}")
|
|
fi
|
|
;;
|
|
esac
|
|
|
|
|
|
|
|
export compose_contents
|
|
[ "${services_args[*]}" ] && debug " ${DARKWHITE}Services:$NORMAL ${DARKYELLOW}${services_args[*]}$NORMAL"
|
|
[ "${compose_opts[*]}" ] && debug " ${DARKWHITE}Main docker-compose opts:$NORMAL ${compose_opts[*]}"
|
|
[ "${action_posargs[*]}" ] && debug " ${DARKWHITE}Main docker-compose pos args:$NORMAL ${action_posargs[*]}"
|
|
[ "${action_opts[*]}" ] && debug " ${DARKWHITE}Action $DARKCYAN$action$NORMAL with opts:$NORMAL ${action_opts[*]}"
|
|
[ "${remainder_args[*]}" ] && debug " ${DARKWHITE}Remainder args:$NORMAL ${remainder_args[*]}"
|
|
|
|
|
|
|
|
aexport remainder_args
|
|
|
|
|
|
##
|
|
## Actual code
|
|
##
|
|
|
|
COMPOSE_YML_FILE=$(get_compose_yml_location) || exit 1
|
|
COMPOSE_YML_CONTENT=$(get_compose_yml_content) || exit 1
|
|
COMPOSE_YML_CONTENT_HASH=$(compose:yml:hash) || exit 1
|
|
CHARM_STORE_HASH=$(charm.store_metadata_hash) || exit 1
|
|
COMBINED_HASH=$(H "$COMPOSE_YML_CONTENT_HASH" "$CHARM_STORE_HASH") || exit 1
|
|
export COMPOSE_YML_FILE COMPOSE_YML_CONTENT COMPOSE_YML_CONTENT_HASH CHARM_STORE_HASH COMBINED_HASH
|
|
charm.sanity_checks || die "Sanity checks about charm-store failed. Please correct."
|
|
|
|
##
|
|
## Get services in command line.
|
|
##
|
|
|
|
|
|
if [ -z "$is_docker_compose_action" ] && [ -z "$is_more_action" ] && [ -n "$action" ]; then
|
|
action_service=${remainder_args[0]}
|
|
if [ -z "$action_service" ]; then
|
|
err "No such command or action: ${DARKCYAN}$action${NORMAL}"
|
|
display_commands_help
|
|
exit 1
|
|
fi
|
|
|
|
services_args=($(compose:yml:root:services)) || return 1
|
|
## Required by has_service_action
|
|
service:all:set_relations_hash
|
|
|
|
remainder_args=("${remainder_args[@]:1}")
|
|
if has_service_action "$action_service" "$action" >/dev/null; then
|
|
is_service_action=true
|
|
services_args=("$action_service")
|
|
{
|
|
read-0 action_type
|
|
case "$action_type" in
|
|
"relation")
|
|
read-0 _ target_service _target_charm relation_name _ action_script_path
|
|
debug "Found action $DARKYELLOW${action_service}$NORMAL/$DARKBLUE$relation_name$NORMAL/$DARKCYAN$action$NORMAL (in $DARKYELLOW$target_service$NORMAL)"
|
|
services_args+=("$target_service")
|
|
;;
|
|
"direct")
|
|
read-0 _ action_script_path
|
|
debug "Found action $DARKYELLOW${action_service}$NORMAL.$DARKCYAN$action$NORMAL"
|
|
;;
|
|
esac
|
|
} < <(has_service_action "$action_service" "$action")
|
|
get_all_relations "${services_args[@]}" >/dev/null || {
|
|
echo " Hint: if this is unexpected, you can try to delete caches, and re-run the command." >&2
|
|
exit 1
|
|
}
|
|
|
|
## Divert logging to stdout to stderr
|
|
log () { cat >&2; }
|
|
export -f log
|
|
else
|
|
die "Unknown action '${DARKCYAN}$action$NORMAL': It doesn't match any docker-compose commands nor inner charm actions."
|
|
fi
|
|
else
|
|
case "$action" in
|
|
ps|up)
|
|
if [ "${#services_args[@]}" == 0 ]; then
|
|
services_args=($(compose:yml:root:services)) || return 1
|
|
fi
|
|
;;
|
|
status)
|
|
services_args=("${state_services[@]}")
|
|
if [ "${#services_args[@]}" == 0 ] && [ -z "$state_all_services" ]; then
|
|
services_args=($(compose:yml:root:services)) || return 1
|
|
fi
|
|
;;
|
|
config)
|
|
services_args=("${action_posargs[@]}")
|
|
;;
|
|
esac
|
|
fi
|
|
|
|
export COMPOSE_ACTION="$action"
|
|
NO_CONSTRAINT_CHECK=True
|
|
case "$action" in
|
|
up|status|run)
|
|
NO_CONSTRAINT_CHECK=
|
|
if [ -n "$DEBUG" ]; then
|
|
Elt "solve all relations"
|
|
start=$(time_now)
|
|
fi
|
|
service:all:set_relations_hash || exit 1
|
|
|
|
if [ -n "$DEBUG" ]; then
|
|
elapsed="$(time_elapsed $start "$(time_now)")" || exit 1
|
|
print_info "$(printf "%.3fs" "$elapsed")"
|
|
Feedback
|
|
fi
|
|
|
|
all_services=($(get_all_services)) || exit 1
|
|
## check that services_args is a subset of all_services
|
|
for service in "${services_args[@]}"; do
|
|
[[ " ${all_services[*]} " == *" $service "* ]] || {
|
|
err "Service ${DARKYELLOW}$service${NORMAL} is not defined in the current compose file."
|
|
echo " Neither is is a dependency of a service in the compose file." >&2
|
|
echo " These are the services directly or indirectly available from current compose file:" >&2
|
|
for service in "${all_services[@]}"; do
|
|
echo " - ${DARKYELLOW}$service${NORMAL}" >&2
|
|
done
|
|
exit 1
|
|
}
|
|
done
|
|
;;
|
|
esac
|
|
|
|
|
|
case "$action" in
|
|
up)
|
|
PROJECT_NAME=$(get_default_project_name) || exit 1
|
|
|
|
## Remove all intents (*ing states)
|
|
rm -f "$SERVICE_STATE_PATH/$PROJECT_NAME"/*/*ing || true
|
|
|
|
## Notify that we have the intent to bring up all these
|
|
## This will be use in inner or concurrent 'run' to include the
|
|
## services that are supposed to be up.
|
|
mkdir -p "$SERVICE_STATE_PATH/$PROJECT_NAME" || exit 1
|
|
services_args_deps=($(get_ordered_service_dependencies "${services_args[@]}")) || exit 1
|
|
for service in "${services_args_deps[@]}"; do
|
|
mkdir -p "$SERVICE_STATE_PATH/$PROJECT_NAME"/"$service" || exit 1
|
|
[ -e "$SERVICE_STATE_PATH/$PROJECT_NAME"/"$service"/up ] || {
|
|
touch "$SERVICE_STATE_PATH/$PROJECT_NAME"/"$service"/deploying || exit 1
|
|
}
|
|
done
|
|
## remove services not included in compose.yml anymore
|
|
all_services_deps=($(get_ordered_service_dependencies "${all_services[@]}")) || exit 1
|
|
for service in "$SERVICE_STATE_PATH/$PROJECT_NAME"/*/up; do
|
|
[ -e "$service" ] || continue
|
|
state=${service##*/}
|
|
service=${service%/$state}
|
|
service=${service##*/}
|
|
|
|
if [[ " ${all_services_deps[*]} " != *" ${service} "* ]]; then
|
|
touch "$SERVICE_STATE_PATH/$PROJECT_NAME"/"${service}"/orphaning || exit 1
|
|
fi
|
|
done
|
|
;;
|
|
|
|
run)
|
|
PROJECT_NAME=$(get_default_project_name) || return 1
|
|
if [ -d "$SERVICE_STATE_PATH/$PROJECT_NAME" ]; then
|
|
## Notify that we have the intent to bring up all these
|
|
## This will be use in inner or concurrent 'run' to include the
|
|
## services that are supposed to be up.
|
|
for service in "$SERVICE_STATE_PATH/$PROJECT_NAME"/*/{up,deploying}; do
|
|
[ -e "$service" ] || continue
|
|
state=${service##*/}
|
|
service=${service%/$state}
|
|
service=${service##*/}
|
|
## don't add if orphaning
|
|
[ -e "$SERVICE_STATE_PATH/$PROJECT_NAME"/"${service}"/orphaning ] && continue
|
|
|
|
done
|
|
fi
|
|
;;
|
|
status)
|
|
if [ -n "${state_all_services}" ] || [[ "${#state_filters[@]}" -gt 0 ]]; then
|
|
services_args=("${all_services[@]}")
|
|
fi
|
|
;;
|
|
esac
|
|
|
|
if [ -n "$DEBUG" ]; then
|
|
Elt "get relation subset"
|
|
start=$(time_now)
|
|
fi
|
|
|
|
get_subset_relations "${services_args[@]}" >/dev/null || exit 1
|
|
|
|
if [ -n "$DEBUG" ]; then
|
|
elapsed="$(time_elapsed $start "$(time_now)")" || exit 1
|
|
print_info "$(printf "%.3fs" "$elapsed")"
|
|
Feedback
|
|
fi
|
|
|
|
if [ -n "$is_docker_compose_action" ] && [ "${#services_args[@]}" -gt 0 ]; then
|
|
services=($(get_master_services "${services_args[@]}")) || exit 1
|
|
if [ "$action" == "up" ]; then
|
|
action_posargs+=($(services:get:upable "${services_args[@]}")) || exit 1
|
|
elif [ "$is_docker_compose_action_multi_service" == "1" ]; then
|
|
action_posargs+=("${services[@]}")
|
|
elif [ "$is_docker_compose_action_multi_service" == "0" ]; then
|
|
action_posargs+=("${services[0]}") ## only the first service is the legit one
|
|
fi
|
|
## Get rid of subordinates
|
|
action_posargs=($(get_master_services "${action_posargs[@]}")) || exit 1
|
|
fi
|
|
|
|
|
|
get_docker_compose "${services_args[@]}" >/dev/null || { ## precalculate variable \$_current_docker_compose
|
|
err "Fails to compile base 'docker-compose.yml'"
|
|
exit 1
|
|
}
|
|
|
|
|
|
##
|
|
## Pre-action
|
|
##
|
|
|
|
|
|
full_init=
|
|
case "$action" in
|
|
build)
|
|
full_init=true ## will actually stop after build
|
|
;;
|
|
up|run)
|
|
full_init=true
|
|
post_hook=true
|
|
;;
|
|
""|down|restart|logs|config|ps|status)
|
|
full_init=
|
|
;;
|
|
*)
|
|
if [ "$is_service_action" ]; then
|
|
full_init=true
|
|
keywords=($(egrep "^#*\s*compose:" "$action_script_path" | cut -f 2- -d:))
|
|
for keyword in "${keywords[@]}"; do
|
|
case "$keyword" in
|
|
no-hooks)
|
|
no_hooks=true
|
|
;;
|
|
hooks)
|
|
full_init=true
|
|
;;
|
|
esac
|
|
done
|
|
fi
|
|
;;
|
|
esac
|
|
|
|
if [ -n "$full_init" ]; then
|
|
if [[ "$action" == "build" ]] || [[ -z "$no_init" && -z "$no_hooks" ]]; then
|
|
[[ "$action" == "build" ]] || Section "acquire charm's images"
|
|
run_service_acquire_images "${services_args[@]}" || exit 1
|
|
Feed
|
|
[ "$action" == "build" ] && {
|
|
exit 0
|
|
}
|
|
|
|
Section setup host resources
|
|
setup_host_resources "${services_args[@]}" || exit 1
|
|
## init in order
|
|
Section initialisation
|
|
run_service_hook init "${services_args[@]}" || exit 1
|
|
fi
|
|
## Get relations
|
|
if [[ -z "$no_relations" && -z "$no_hooks" ]]; then
|
|
if [ "${#rebuild_relations_to_service[@]}" != 0 ]; then
|
|
rebuild_relations_to_service=$(get_master_services "${rebuild_relations_to_service[@]}") || return 1
|
|
rebuild_relations_to_service=($rebuild_relations_to_service)
|
|
project=$(get_default_project_name) || return 1
|
|
for service in "${rebuild_relations_to_service[@]}"; do
|
|
for dir in "$VARDIR/relations/$project/"*"-${service}/"*; do
|
|
[ -d "$dir" ] && {
|
|
debug rm -rf "$dir"
|
|
rm -rf "$dir"
|
|
}
|
|
done
|
|
done
|
|
fi
|
|
run_service_relations "${services_args[@]}" || exit 1
|
|
fi
|
|
|
|
if [[ -z "$no_pre_deploy" && -z "$no_hooks" ]]; then
|
|
run_service_hook pre_deploy "${services_args[@]}" || exit 1
|
|
fi
|
|
|
|
fi | log
|
|
if [ "${PIPESTATUS[0]}" != 0 ]; then
|
|
exit 1
|
|
fi
|
|
[ "$action" == "build" ] && exit 0
|
|
|
|
if [ "$action" == "status" ]; then
|
|
if [[ -n "${state_all_services}" ]] || [[ "${#state_filters[@]}" -gt 0 ]]; then
|
|
compose_yml_services=($(compose:yml:root:services)) || exit 1
|
|
fi
|
|
if [[ -n "${state_all_services}" ]]; then
|
|
state_columns=("root" ${state_columns[@]})
|
|
fi
|
|
|
|
state_columns_raw=()
|
|
for col in "${state_columns[@]}"; do
|
|
if [[ "$col" =~ ^[+-] ]]; then
|
|
col=${col:1}
|
|
fi
|
|
state_columns_raw+=("${col//-/_}")
|
|
done
|
|
state_columns_align=""
|
|
for col in "${state_columns[@]}"; do
|
|
if [[ "$col" == "-"* ]]; then
|
|
state_columns_align+="-"
|
|
elif [[ "$col" == "+"* ]]; then
|
|
state_columns_align+="+"
|
|
else
|
|
case "${col//_/-}" in
|
|
version|upstream-version) state_columns_align+="+";;
|
|
*) state_columns_align+="-";;
|
|
esac
|
|
fi
|
|
done
|
|
|
|
while read-0-err E "${state_columns_raw[@]}"; do
|
|
values=()
|
|
for col in "${state_columns_raw[@]}"; do
|
|
color=
|
|
value="${!col}"
|
|
read -r -- value_trim <<<"${!col}"
|
|
case "${col//_/-}" in
|
|
root)
|
|
case "$value_trim" in
|
|
0) value=" ";;
|
|
1) value="*";;
|
|
esac
|
|
;;
|
|
name) color=darkyellow;;
|
|
charm) color=darkpink;;
|
|
state)
|
|
case "$value_trim" in
|
|
up) color=green;;
|
|
down) color=gray;;
|
|
deploying) color=yellow;;
|
|
*) color=red;;
|
|
esac
|
|
;;
|
|
type)
|
|
case "$value_trim" in
|
|
run-once) color=gray;;
|
|
stub) color=gray;;
|
|
*) color=darkcyan;;
|
|
esac
|
|
;;
|
|
*)
|
|
if [[ "${value_trim}" == "N/A" ]]; then
|
|
color=gray
|
|
fi
|
|
;;
|
|
esac
|
|
color="${color^^}"
|
|
if [ -n "$color" ]; then
|
|
values+=("${!color}$value${NORMAL}")
|
|
else
|
|
values+=("$value")
|
|
fi
|
|
done
|
|
first=1
|
|
for value in "${values[@]}"; do
|
|
if [ -n "$first" ]; then
|
|
first=
|
|
else
|
|
printf " "
|
|
fi
|
|
printf "%s" "$value"
|
|
done
|
|
printf "\n"
|
|
done < <(
|
|
set -o pipefail
|
|
filter_cols=()
|
|
for filter in "${state_filters[@]}"; do
|
|
IFS="=" read -r key value <<<"$filter"
|
|
## if not already in state_columns_raw
|
|
[[ " ${state_columns_raw[*]} " == *" $key "* ]] ||
|
|
filter_cols+=("${key//-/_}")
|
|
done
|
|
for service in "${services_args[@]}"; do
|
|
declare -A values=()
|
|
for col in "${state_columns_raw[@]}" "${filter_cols[@]}"; do
|
|
case "${col//_/-}" in
|
|
root)
|
|
if [[ " ${compose_yml_services[*]} " == *" ${service} "* ]]; then
|
|
value="1"
|
|
else
|
|
value="0"
|
|
fi
|
|
;;
|
|
name) value="$service" ;;
|
|
charm)
|
|
value=$(get_service_charm "$service") || { echo 1; exit 1; }
|
|
;;
|
|
state)
|
|
value=$(service:state "$service") || { echo 1; exit 1; }
|
|
;;
|
|
type)
|
|
value=$(get_service_type "$service") || { echo 1; exit 1; }
|
|
;;
|
|
upstream-version)
|
|
value=$(service:upstream-version "$service") || { echo 1; exit 1; }
|
|
value=${value:-N/A}
|
|
;;
|
|
*)
|
|
if has_service_action "$service" "get-$col" >/dev/null; then
|
|
state_msg=$(run_service_action "$service" "get-$col") || { echo 1; exit 1 ; }
|
|
if [[ "$state_msg" == *$'\n'* ]]; then
|
|
value="${state_msg%%$'\n'*}"
|
|
## XXXvlab: For now, these are not used, but we could
|
|
## display them in additional lines (in same "cell")
|
|
msgs="${state_msg#*$'\n'}"
|
|
else
|
|
value=${state_msg}
|
|
fi
|
|
else
|
|
value="N/A"
|
|
fi
|
|
;;
|
|
esac
|
|
values["$col"]="$value"
|
|
done
|
|
for filter in "${state_filters[@]}"; do
|
|
IFS="=" read -r key value <<<"$filter"
|
|
[[ "${values[$key]}" != "$value" ]] &&
|
|
continue 2
|
|
done
|
|
for col in "${state_columns_raw[@]}"; do
|
|
p0 "${values[$col]}"
|
|
done
|
|
done | col-0:normalize:size "${state_columns_align}"
|
|
echo 0
|
|
)
|
|
if [ "$E" != 0 ]; then
|
|
echo "E: '$E'" >&2
|
|
exit 1
|
|
fi
|
|
exit 0
|
|
fi
|
|
|
|
|
|
if [ "$action" == "run" ] && [ "${#services_args}" != 0 ]; then
|
|
charm=$(get_service_charm "${services_args[0]}") || exit 1
|
|
metadata=$(charm.metadata "$charm") || exit 1
|
|
SERVICE_TYPE="$(printf "%s" "$metadata" | shyaml get-value type 2>/dev/null)" || true
|
|
if [ "$SERVICE_TYPE" == "run-once" ]; then
|
|
run_service_hook dc-pre-run "${services_args[@]}" || exit 1
|
|
fi
|
|
fi
|
|
|
|
|
|
export SERVICE_PACK="${services_args[*]}"
|
|
|
|
##
|
|
## Docker-compose
|
|
##
|
|
|
|
errlvl="0"
|
|
case "$action" in
|
|
up|start|stop|build|run)
|
|
## force daemon mode for up
|
|
if [[ "$action" == "up" ]]; then
|
|
if ! array_member action_opts -d; then
|
|
action_opts+=("-d")
|
|
fi
|
|
if ! array_member action_opts --remove-orphans; then
|
|
action_opts+=("--remove-orphans")
|
|
fi
|
|
fi
|
|
launch_docker_compose "${compose_opts[@]}" "$action" "${action_opts[@]}" "${action_posargs[@]}" "${remainder_args[@]}"
|
|
;;
|
|
logs)
|
|
if ! array_member action_opts --tail; then ## force daemon mode for up
|
|
action_opts+=("--tail" "10")
|
|
fi
|
|
launch_docker_compose "${compose_opts[@]}" "$action" "${action_opts[@]}" "${action_posargs[@]}" "${remainder_args[@]}"
|
|
;;
|
|
"")
|
|
launch_docker_compose "${compose_opts[@]}"
|
|
;;
|
|
graph)
|
|
graph $SERVICE_PACK
|
|
;;
|
|
config)
|
|
## removing the services
|
|
services=($(get_master_services "${action_posargs[@]}")) || exit 1
|
|
## forcing docker-compose config to output the config file to stdout and not stderr
|
|
out=$(launch_docker_compose "${compose_opts[@]}" "$action" "${action_opts[@]}" "${remainder_args[@]}" 2>&1) || {
|
|
echo "$out"
|
|
exit 1
|
|
}
|
|
echo "$out"
|
|
warn "Runtime configuration modification (from relations) are not included here."
|
|
;;
|
|
down)
|
|
if ! array_member action_opts --remove-orphans; then ## force daemon mode for up
|
|
debug "Adding a default argument of '--remove-orphans'"
|
|
action_opts+=("--remove-orphans")
|
|
fi
|
|
launch_docker_compose "${compose_opts[@]}" "$action" "${action_opts[@]}" "${remainder_args[@]}"
|
|
;;
|
|
*)
|
|
if [ "$is_service_action" ]; then
|
|
run_service_action "$action_service" "$action" "${remainder_args[@]}"
|
|
errlvl="$?"
|
|
errlvl "$errlvl"
|
|
else
|
|
launch_docker_compose "${compose_opts[@]}" "$action" "${action_opts[@]}" "${action_posargs[@]}" "${remainder_args[@]}"
|
|
fi
|
|
;;
|
|
esac || exit 1
|
|
|
|
|
|
if [ "$post_hook" -a "${#services_args[@]}" != 0 -a -z "$no_hooks" -a -z "$no_post_deploy" ]; then
|
|
run_service_hook post_deploy "${services_args[@]}" || exit 1
|
|
fi
|
|
|
|
if [ "$action" == "run" -a "${#services_args}" != 0 ]; then
|
|
if [ "$SERVICE_TYPE" == "run-once" ]; then
|
|
run_service_hook dc-post-run "${services_args[@]}" || exit 1
|
|
fi
|
|
fi
|
|
|
|
case "$action" in
|
|
up)
|
|
## Notify that services in 'deploying' states have been deployed
|
|
for service in "$SERVICE_STATE_PATH/$PROJECT_NAME"/*/deploying; do
|
|
[ -e "$service" ] || continue
|
|
state=${service##*/}
|
|
service=${service%/$state}
|
|
service=${service##*/}
|
|
|
|
mv "$SERVICE_STATE_PATH/$PROJECT_NAME"/"${service}"/{deploying,up} || exit 1
|
|
done
|
|
## Notify that services in 'orphaning' states have been removed
|
|
for service in "$SERVICE_STATE_PATH/$PROJECT_NAME"/*/orphaning; do
|
|
[ -e "$service" ] || continue
|
|
state=${service##*/}
|
|
service=${service%/$state}
|
|
service=${service##*/}
|
|
|
|
rm "$SERVICE_STATE_PATH/$PROJECT_NAME"/"${service}"/orphaning || exit 1
|
|
done
|
|
;;
|
|
down)
|
|
PROJECT_NAME=$(get_default_project_name) || return 1
|
|
if [ -d "$SERVICE_STATE_PATH/$PROJECT_NAME" ]; then
|
|
if ! dir_is_empty "$SERVICE_STATE_PATH/$PROJECT_NAME"; then
|
|
rm -f "$SERVICE_STATE_PATH/$PROJECT_NAME"/*/*
|
|
fi
|
|
rmdir "$SERVICE_STATE_PATH/$PROJECT_NAME"/{*,}
|
|
fi
|
|
;;
|
|
esac
|
|
|
|
clean_unused_docker_compose || exit 1
|
|
|
|
exit "$errlvl"
|