You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
5114 lines
171 KiB
5114 lines
171 KiB
#!/bin/bash
|
|
|
|
##
|
|
## TODO:
|
|
## - subordinate container should really be able to modify base image of their master
|
|
## - this could be done through docker-update
|
|
## - I'm not happy with the current build using 'build/' directory, this should be
|
|
## changed to:
|
|
## - always have a base image (specified in metadata), and always have hooks/install
|
|
## executed and merge in image (like docker-build-charm).
|
|
## - container base image is ALWAYS the image of the master container... this brings
|
|
## questions about a double way to express inheritage (through relations as it is
|
|
## implemented now, or through this base-image ?)
|
|
## - the name of the scripts for relation (aka relation_name-relation-joined) is bad as
|
|
## reading the name in a hooks/ dir, there are no way to know if we are the target or
|
|
## the base of the relation.
|
|
## - we could leverage a 'relations/' dir on the root of the charm, with both:
|
|
## 'relations/provide/relation_name' and 'relations/receive/relation_name'
|
|
## - a very bad point with the actual naming is that we can't have a providing AND
|
|
## receiving a relation with same name.
|
|
## - The cache system should keep md5 of docker-compose and other things between runs
|
|
## - The cache system should use underlying function that have only arguments inputs.
|
|
## This will allow to cache completely without issues function in time.
|
|
## - would probably need instrospection in charm custom action to know if these need
|
|
## init or relations to be set up.
|
|
## - Be clear about when the SERVICE name is used and the CHARM name is used.
|
|
## - in case of service contained in another container
|
|
## - in normal case
|
|
## - in docker-compose, can't use charm name: if we want 2 instances of the same charm
|
|
## we are stuck. What will be unique is the name of the service.
|
|
## - some relations are configured in compose.yml but should not trigger the loading
|
|
## of necessary component (for instance, apache --> log-rotate), if log-rotate is
|
|
## not there, this link should considered optional.
|
|
## - Could probably allow an unexistent charm to be populated with only "docker-image:"
|
|
## of the same name. Although this should trigger a visible warning.
|
|
|
|
|
|
#:-
|
|
[ -e /etc/shlib ] && . /etc/shlib || {
|
|
echo "Unsatisfied dependency. Please install 'kal-shlib-core'."
|
|
exit 1
|
|
}
|
|
#:-
|
|
|
|
include common
|
|
include pretty
|
|
include parse
|
|
include charm
|
|
include array
|
|
include cla
|
|
include docker
|
|
|
|
depends shyaml docker
|
|
|
|
exname="compose"
|
|
version=0.1
|
|
usage="$exname [COMPOSE_OPTS] [ACTION [ACTION_OPTS]]"
|
|
help="\
|
|
|
|
$WHITE$exname$NORMAL jobs is to run various shell scripts to build
|
|
a running orchestrated and configured docker containers. These shell
|
|
scripts will have the opportunity to build a 'docker-compose.yml'.
|
|
|
|
Once init script and relations scripts are executed, $WHITE$exname$NORMAL
|
|
delegate the launching to ${WHITE}docker-compose${NORMAL} by providing it
|
|
the final 'docker-compose.yml'.
|
|
|
|
$WHITE$exname$NORMAL also leverage charms to offer some additional custom
|
|
actions per charm, which are simply other scripts that can be
|
|
run without launching ${WHITE}docker-compose${NORMAL}.
|
|
|
|
In compose message, color coding is enforced as such:
|
|
- ${DARKCYAN}action$NORMAL,
|
|
- ${DARKBLUE}relation$NORMAL,
|
|
- ${DARKPINK}charm${NORMAL},
|
|
- ${DARKYELLOW}service${NORMAL},
|
|
- ${WHITE}option-name${NORMAL}/${WHITE}command-name${NORMAL}/${WHITE}Section-Title${NORMAL}
|
|
|
|
$WHITE$exname$NORMAL reads '/etc/compose.conf' for global variables, and
|
|
'/etc/compose.local.conf' for local host adjustements.
|
|
|
|
"
|
|
|
|
|
|
## XXXvlab: this doesn't seem to work when 'compose' is called in
|
|
## a hook of a charm.
|
|
#[[ "${BASH_SOURCE[0]}" == "" ]] && SOURCED=true
|
|
$(return >/dev/null 2>&1) && SOURCED=true
|
|
|
|
errlvl() { return "${1:-1}"; }
|
|
export -f errlvl
|
|
|
|
if [ "$UID" == 0 ]; then
|
|
CACHEDIR=${CACHEDIR:-/var/cache/compose}
|
|
VARDIR=${VARDIR:-/var/lib/compose}
|
|
else
|
|
[ "$XDG_CONFIG_HOME" ] && CACHEDIR=${CACHEDIR:-$XDG_CONFIG_HOME/compose}
|
|
[ "$XDG_DATA_HOME" ] && VARDIR=${VARDIR:-$XDG_DATA_HOME/compose}
|
|
CACHEDIR=${CACHEDIR:-$HOME/.cache/compose}
|
|
VARDIR=${VARDIR:-$HOME/.local/share/compose}
|
|
fi
|
|
export VARDIR CACHEDIR
|
|
|
|
|
|
md5_compat() { md5sum | cut -c -32; }
|
|
quick_cat_file() { quick_cat_stdin < "$1"; }
|
|
quick_cat_stdin() { local IFS=''; while read -r line; do echo "$line"; done ; }
|
|
export -f quick_cat_file quick_cat_stdin md5_compat
|
|
|
|
|
|
read-0-err() {
|
|
local ret="$1" eof="" idx=0 last=
|
|
read -r -- "${ret?}" <<<"0"
|
|
shift
|
|
while [ "$1" ]; do
|
|
last=$idx
|
|
read -r -d '' -- "$1" || {
|
|
## Put this last value in ${!ret}
|
|
eof="$1"
|
|
read -r -- "$ret" <<<"${!eof}"
|
|
break
|
|
}
|
|
((idx++))
|
|
shift
|
|
done
|
|
[ -z "$eof" ] || {
|
|
if [ "$last" != 0 ]; then
|
|
echo "Error: read-0-err couldn't fill all value" >&2
|
|
read -r -- "$ret" <<<"127"
|
|
else
|
|
if [ -z "${!ret}" ]; then
|
|
echo "Error: last value is not a number, did you finish with an errorlevel ?" >&2
|
|
read -r -- "$ret" <<<"126"
|
|
fi
|
|
fi
|
|
false
|
|
}
|
|
}
|
|
export -f read-0-err
|
|
|
|
p-err() {
|
|
"$@"
|
|
echo "$?"
|
|
}
|
|
export -f p-err
|
|
|
|
|
|
wyq() {
|
|
local exp="$1"
|
|
yq e -e -0 "$1"
|
|
printf "%s" "$?"
|
|
}
|
|
|
|
|
|
wyq-r() {
|
|
local exp="$1"
|
|
yq e -e -0 -r=false "$1"
|
|
printf "%s" "$?"
|
|
}
|
|
|
|
|
|
clean_cache() {
|
|
local i=0
|
|
for f in $(ls -t "$CACHEDIR/"*.cache.* 2>/dev/null | tail -n +500); do
|
|
((i++))
|
|
rm -f "$f"
|
|
done
|
|
if (( i > 0 )); then
|
|
debug "${WHITE}Cleaned cache:${NORMAL} Removed $((i)) elements (current cache size is $(du -sh "$CACHEDIR" | cut -f 1))"
|
|
fi
|
|
}
|
|
|
|
|
|
export DEFAULT_COMPOSE_FILE
|
|
|
|
##
|
|
## Merge YAML files
|
|
##
|
|
|
|
export _merge_yaml_common_code="
|
|
|
|
import sys
|
|
import yaml
|
|
|
|
try:
|
|
from yaml import CSafeLoader as SafeLoader, CSafeDumper as SafeDumper
|
|
except ImportError: ## pragma: no cover
|
|
sys.stderr.write('YAML code in pure python\n')
|
|
exit(1)
|
|
from yaml import SafeLoader, SafeDumper
|
|
|
|
class MySafeLoader(SafeLoader): pass
|
|
class MySafeDumper(SafeDumper): pass
|
|
|
|
|
|
try:
|
|
# included in standard lib from Python 2.7
|
|
from collections import OrderedDict
|
|
except ImportError:
|
|
# try importing the backported drop-in replacement
|
|
# it's available on PyPI
|
|
from ordereddict import OrderedDict
|
|
|
|
|
|
## Ensure that there are no collision with legacy OrderedDict
|
|
## that could be used for omap for instance.
|
|
class MyOrderedDict(OrderedDict):
|
|
pass
|
|
|
|
MySafeDumper.add_representer(
|
|
MyOrderedDict,
|
|
lambda cls, data: cls.represent_dict(data.items()))
|
|
|
|
|
|
def construct_omap(cls, node):
|
|
cls.flatten_mapping(node)
|
|
return MyOrderedDict(cls.construct_pairs(node))
|
|
|
|
|
|
MySafeLoader.add_constructor(
|
|
yaml.resolver.BaseResolver.DEFAULT_MAPPING_TAG,
|
|
construct_omap)
|
|
|
|
|
|
##
|
|
## Support local and global objects
|
|
##
|
|
|
|
class EncapsulatedNode(object): pass
|
|
|
|
|
|
def mk_encapsulated_node(s, node):
|
|
|
|
method = 'construct_%s' % (node.id, )
|
|
data = getattr(s, method)(node)
|
|
|
|
class _E(data.__class__, EncapsulatedNode):
|
|
pass
|
|
|
|
_E.__name__ = str(node.tag)
|
|
_E._node = node
|
|
return _E(data)
|
|
|
|
|
|
def represent_encapsulated_node(s, o):
|
|
value = s.represent_data(o.__class__.__bases__[0](o))
|
|
value.tag = o.__class__.__name__
|
|
return value
|
|
|
|
|
|
MySafeDumper.add_multi_representer(EncapsulatedNode,
|
|
represent_encapsulated_node)
|
|
MySafeLoader.add_constructor(None, mk_encapsulated_node)
|
|
|
|
|
|
def fc(filename):
|
|
with open(filename) as f:
|
|
return f.read()
|
|
|
|
def merge(*args):
|
|
# sys.stderr.write('%r\n' % (args, ))
|
|
args = [arg for arg in args if arg is not None]
|
|
if len(args) == 0:
|
|
return None
|
|
if len(args) == 1:
|
|
return args[0]
|
|
if all(isinstance(arg, (int, basestring, bool, float)) for arg in args):
|
|
return args[-1]
|
|
elif all(isinstance(arg, list) for arg in args):
|
|
res = []
|
|
for arg in args:
|
|
for elt in arg:
|
|
if elt in res:
|
|
res.remove(elt)
|
|
res.append(elt)
|
|
return res
|
|
elif all(isinstance(arg, dict) for arg in args):
|
|
keys = set()
|
|
for arg in args:
|
|
keys |= set(arg.keys())
|
|
dct = {}
|
|
for key in keys:
|
|
sub_args = []
|
|
for arg in args:
|
|
if key in arg:
|
|
sub_args.append(arg)
|
|
try:
|
|
dct[key] = merge(*(a[key] for a in sub_args))
|
|
except NotImplementedError as e:
|
|
raise NotImplementedError(
|
|
e.args[0],
|
|
'%s.%s' % (key, e.args[1]) if e.args[1] else key,
|
|
e.args[2])
|
|
if dct[key] is None:
|
|
del dct[key]
|
|
return dct
|
|
else:
|
|
raise NotImplementedError(
|
|
'Unsupported types: %s'
|
|
% (', '.join(list(set(arg.__class__.__name__ for arg in args)))), '', args)
|
|
return None
|
|
|
|
def merge_cli(*args):
|
|
try:
|
|
c = merge(*args)
|
|
except NotImplementedError as e:
|
|
sys.stderr.write('Merging Failed: %s.\n%s\n'
|
|
' Values are:\n %s\n'
|
|
% (e.args[0],
|
|
' Conflicting key is %r.' % e.args[1] if e.args[1] else
|
|
' Conflict at base of structure.',
|
|
'\\n '.join('v%d: %r' % (i, a)
|
|
for i, a in enumerate(e.args[2]))))
|
|
exit(1)
|
|
if c is not None:
|
|
print '%s' % yaml.dump(c, default_flow_style=False, Dumper=MySafeDumper)
|
|
|
|
"
|
|
|
|
|
|
merge_yaml() {
|
|
|
|
if ! [ -r "$state_tmpdir/merge_yaml.py" ]; then
|
|
cat <<EOF > "$state_tmpdir/merge_yaml.py"
|
|
|
|
$_merge_yaml_common_code
|
|
|
|
merge_cli(*(yaml.load(fc(f), Loader=MySafeLoader) for f in sys.argv[1:]))
|
|
EOF
|
|
fi
|
|
|
|
python "$state_tmpdir/merge_yaml.py" "$@"
|
|
}
|
|
export -f merge_yaml
|
|
|
|
|
|
merge_yaml_str() {
|
|
local entries="$@"
|
|
|
|
if ! [ -r "$state_tmpdir/merge_yaml_str.py" ]; then
|
|
cat <<EOF > "$state_tmpdir/merge_yaml_str.py" || return 1
|
|
|
|
$_merge_yaml_common_code
|
|
|
|
merge_cli(*(yaml.load(f, Loader=MySafeLoader) for f in sys.argv[1:]))
|
|
EOF
|
|
fi
|
|
|
|
if ! python "$state_tmpdir/merge_yaml_str.py" "$@"; then
|
|
err "Failed to merge yaml strings:"
|
|
local s
|
|
for s in "$@"; do
|
|
printf " - \n"
|
|
printf "%s\n" "$s" | prefix " ${GRAY}|$NORMAL "
|
|
done >&2
|
|
return 1
|
|
fi
|
|
}
|
|
export -f merge_yaml_str
|
|
|
|
|
|
yaml_get_values() {
|
|
local sep=${1:-$'\n'} value input type first elt
|
|
input=$(cat -)
|
|
if [ -z "$input" ] || [[ "$input" =~ ^None|null$ ]]; then
|
|
return 0
|
|
fi
|
|
type=$(e "$input" | shyaml get-type)
|
|
value=
|
|
case "$type" in
|
|
"sequence")
|
|
first=1
|
|
while read-0 elt; do
|
|
elt="$(e "$elt" | yaml_get_interpret)" || return 1
|
|
[ "$elt" ] || continue
|
|
if [ "$first" ]; then
|
|
first=
|
|
else
|
|
value+="$sep"
|
|
fi
|
|
first=
|
|
value+="$elt"
|
|
done < <(e "$input" | shyaml -y get-values-0)
|
|
;;
|
|
"struct")
|
|
while read-0 val; do
|
|
value+=$'\n'"$(e "$val" | yaml_get_interpret)" || return 1
|
|
done < <(e "$input" | shyaml -y values-0)
|
|
;;
|
|
"NoneType")
|
|
value=""
|
|
;;
|
|
"str"|*)
|
|
value+="$(e "$input" | yaml_get_interpret)"
|
|
;;
|
|
esac
|
|
e "$value"
|
|
}
|
|
export -f yaml_get_values
|
|
|
|
|
|
yaml_key_val_str() {
|
|
local entries="$@"
|
|
|
|
if ! [ -r "$state_tmpdir/yaml_key_val_str.py" ]; then
|
|
cat <<EOF > "$state_tmpdir/yaml_key_val_str.py"
|
|
|
|
$_merge_yaml_common_code
|
|
|
|
print '%s' % yaml.dump(
|
|
{
|
|
yaml.load(sys.argv[1], Loader=MySafeLoader):
|
|
yaml.load(sys.argv[2], Loader=MySafeLoader)
|
|
},
|
|
default_flow_style=False,
|
|
Dumper=MySafeDumper,
|
|
)
|
|
|
|
EOF
|
|
fi
|
|
|
|
python "$state_tmpdir/yaml_key_val_str.py" "$@"
|
|
}
|
|
export -f yaml_key_val_str
|
|
|
|
|
|
##
|
|
## Docker
|
|
##
|
|
|
|
docker_has_image() {
|
|
local image="$1"
|
|
images=$(docker images -q "$image" 2>/dev/null) || {
|
|
err "docker images call has failed unexpectedly."
|
|
return 1
|
|
}
|
|
[ -n "$images" ]
|
|
}
|
|
export -f docker_has_image
|
|
|
|
|
|
docker_image_id() {
|
|
local image="$1"
|
|
image_id=$(docker inspect "$image" --format='{{.Id}}') || return 1
|
|
echo "$image_id" # | tee "$cache_file"
|
|
}
|
|
export -f docker_image_id
|
|
|
|
|
|
cached_cmd_on_image() {
|
|
local image="$1" cache_file
|
|
image_id=$(docker_image_id "$image") || return 1
|
|
cache_file="$CACHEDIR/$FUNCNAME.cache.$(echo "$*" | md5_compat)"
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
quick_cat_stdin < "$cache_file"
|
|
return 0
|
|
fi
|
|
shift
|
|
out=$(docker run -i --rm --entrypoint /bin/sh "$image_id" -c "$*") || return 1
|
|
echo "$out" | tee "$cache_file"
|
|
}
|
|
export -f cached_cmd_on_image
|
|
|
|
|
|
cmd_on_base_image() {
|
|
local service="$1" base_image
|
|
shift
|
|
base_image=$(service_ensure_image_ready "$service") || return 1
|
|
docker run -i --rm --entrypoint /bin/bash "$base_image" -c "$*"
|
|
}
|
|
export -f cmd_on_base_image
|
|
|
|
|
|
cached_cmd_on_base_image() {
|
|
local service="$1" base_image cache_file="$state_tmpdir/$FUNCNAME.cache.$(printf "%s\0" "$@" | md5_compat)"
|
|
shift
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
quick_cat_stdin < "$cache_file"
|
|
return 0
|
|
fi
|
|
base_image=$(service_ensure_image_ready "$service") || return 1
|
|
result=$(cached_cmd_on_image "$base_image" "$@") || return 1
|
|
echo "$result" | tee "$cache_file"
|
|
}
|
|
export -f cached_cmd_on_base_image
|
|
|
|
|
|
docker_update() {
|
|
## YYY: warning, we a storing important information in cache, cache can
|
|
## be removed.
|
|
|
|
## We want here to cache the last script on given service whatever that script was
|
|
local service="$1" script="$2" cache_file="$CACHEDIR/$FUNCNAME.cache.$1" \
|
|
previous_base_image stored_image_id
|
|
shift
|
|
shift
|
|
## this will build it if necessary
|
|
base_image=$(service_ensure_image_ready "$service") || return 1
|
|
|
|
## XXXvlab: there are probably ways to avoid rebuilding that each time
|
|
image_id="$(docker_image_id "$base_image")" || return 1
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
info "Cache file exists"
|
|
read-0 previous_base_image stored_image_id < <(cat "$cache_file")
|
|
info "previous: $previous_base_image"
|
|
info "stored: $stored_image_id"
|
|
else
|
|
info "No cache file $cache_file"
|
|
previous_base_image=""
|
|
fi
|
|
if [ "$previous_base_image" -a "$stored_image_id" == "$image_id" ]; then
|
|
info "Resetting $base_image to $previous_base_image"
|
|
docker tag "$previous_base_image" "$base_image" || return 1
|
|
image_id="$(docker_image_id "$base_image")" || return 1
|
|
else
|
|
previous_base_image="$image_id"
|
|
fi
|
|
info "Updating base image: $base_image (hash: $image_id)"
|
|
echo "$script" | dupd --debug -u "$base_image" -- "$@" || {
|
|
err "Failed updating base image"
|
|
return 1
|
|
}
|
|
new_image_id="$(docker_image_id "$base_image")"
|
|
[ "$new_image_id" == "$previous_base_image" ] && {
|
|
err "Image was not updated correctly (same id)."
|
|
return 1
|
|
}
|
|
printf "%s\0" "$previous_base_image" "$new_image_id" > "$cache_file"
|
|
info "Wrote cache file $cache_file"
|
|
}
|
|
export -f docker_update
|
|
|
|
|
|
image_exposed_ports_0() {
|
|
local image="$1"
|
|
docker inspect --format='{{range $p, $conf := .Config.ExposedPorts}}{{$p}}{{"\x00"}}{{end}}' "$image"
|
|
}
|
|
export -f image_exposed_ports_0
|
|
|
|
|
|
## feature not yet included in docker: https://github.com/moby/moby/issues/16079
|
|
docker_image_export_dir() {
|
|
local image="$1" src="$2" dst="$3" container_id
|
|
(
|
|
container_id=$(docker create "$image") || exit 1
|
|
trap_add EXIT,ERR "docker rm \"$container_id\" >/dev/null"
|
|
docker cp "$container_id":"$src" "$dst"
|
|
)
|
|
}
|
|
export -f docker_image_export_dir
|
|
|
|
|
|
service_base_image_export_dir() {
|
|
local service="$1" src="$2" dst="$3" base_image
|
|
shift
|
|
base_image=$(service_ensure_image_ready "$service") || return 1
|
|
docker_image_export_dir "$base_image" "$src" "$dst"
|
|
}
|
|
export -f service_base_image_export_dir
|
|
|
|
|
|
service_base_image_id() {
|
|
local service="$1" src="$2" dst="$3" base_image
|
|
shift
|
|
base_image=$(service_ensure_image_ready "$service") || return 1
|
|
docker inspect "$base_image" --format="{{ .Id }}"
|
|
}
|
|
export -f service_base_image_id
|
|
|
|
|
|
##
|
|
## Generic
|
|
##
|
|
|
|
|
|
fn.exists() {
|
|
declare -F "$1" >/dev/null
|
|
}
|
|
|
|
|
|
str_pattern_matches() {
|
|
local str="$1"
|
|
shift
|
|
for pattern in "$@"; do
|
|
eval "[[ \"$str\" == $pattern ]]" && return 0
|
|
done
|
|
return 1
|
|
}
|
|
|
|
|
|
str_matches() {
|
|
local str="$1"
|
|
shift
|
|
for pattern in "$@"; do
|
|
[[ "$str" == "$pattern" ]] && return 0
|
|
done
|
|
return 1
|
|
}
|
|
|
|
gen_password() {
|
|
local l=( {a..z} {A..Z} {0..9} ) nl="${#l[@]}" size=${1:-16}
|
|
while ((size--)); do
|
|
echo -n "${l[$((RANDOM * nl / 32768))]}"
|
|
done
|
|
echo
|
|
}
|
|
export -f gen_password
|
|
|
|
|
|
file_put() {
|
|
local TARGET="$1"
|
|
mkdir -p "$(dirname "$TARGET")" &&
|
|
cat - > "$TARGET"
|
|
}
|
|
export -f file_put
|
|
|
|
|
|
file_put_0() {
|
|
local TARGET="$1"
|
|
mkdir -p "$(dirname "$TARGET")" &&
|
|
cat > "$TARGET"
|
|
}
|
|
export -f file_put_0
|
|
|
|
|
|
fetch_file() {
|
|
local src="$1"
|
|
|
|
case "$src" in
|
|
*"://"*)
|
|
err "Unsupported target scheme."
|
|
return 1
|
|
;;
|
|
*)
|
|
## Try direct
|
|
if ! [ -r "$src" ]; then
|
|
err "File '$src' not found/readable."
|
|
return 1
|
|
fi
|
|
cat "$src" || return 1
|
|
;;
|
|
esac
|
|
}
|
|
export -f fetch_file
|
|
|
|
|
|
## receives stdin content to decompress on stdout
|
|
## stdout content should be tar format.
|
|
uncompress_file() {
|
|
local filename="$1"
|
|
|
|
## Warning, the content of the file is already as stdin, the filename
|
|
## is there to hint for correct decompression.
|
|
case "$filename" in
|
|
*".gz")
|
|
gunzip
|
|
;;
|
|
*".bz2")
|
|
bunzip2
|
|
;;
|
|
*)
|
|
cat
|
|
;;
|
|
esac
|
|
|
|
}
|
|
export -f uncompress_file
|
|
|
|
|
|
get_file() {
|
|
local src="$1"
|
|
|
|
fetch_file "$src" | uncompress_file "$src"
|
|
}
|
|
export -f get_file
|
|
|
|
|
|
##
|
|
## Common database lib
|
|
##
|
|
|
|
_clean_docker() {
|
|
local _DB_NAME="$1" container_id="$2"
|
|
(
|
|
set +e
|
|
debug "Removing container $_DB_NAME"
|
|
docker stop "$container_id"
|
|
docker rm "$_DB_NAME"
|
|
docker network rm "${_DB_NAME}"
|
|
rm -vf "$state_tmpdir/${_DB_NAME}.state"
|
|
) >&2
|
|
}
|
|
export -f _clean_docker
|
|
|
|
|
|
get_service_base_image_dir_uid_gid() {
|
|
local service="$1" dir="$2" uid_gid
|
|
uid_gid=$(cached_cmd_on_base_image "$service" "stat -c '%u %g' '$dir'") || {
|
|
debug "Failed to query '$dir' uid in ${DARKYELLOW}$service${NORMAL} base image."
|
|
return 1
|
|
}
|
|
info "uid and gid from ${DARKYELLOW}$service${NORMAL}:$dir is '$uid_gid'"
|
|
echo "$uid_gid"
|
|
}
|
|
export -f get_service_base_image_dir_uid_gid
|
|
|
|
|
|
get_service_type() {
|
|
local service="$1" cache_file="$state_tmpdir/$FUNCNAME.cache.$1"
|
|
if [ -z "$service" ]; then
|
|
print_syntax_error "$FUNCNAME: Please specify a service as first argument."
|
|
return 1
|
|
fi
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
master_target_service="$(get_top_master_service_for_service "$service")" || return 1
|
|
charm=$(get_service_charm "$master_target_service") || return 1
|
|
metadata=$(charm.metadata "$charm") || return 1
|
|
printf "%s" "$metadata" | shyaml get-value type service 2>/dev/null |
|
|
tee "$cache_file"
|
|
}
|
|
export -f get_service_type
|
|
|
|
are_files_locked_in_dir() {
|
|
local dir="$1" device hdev ldev
|
|
device=$(stat -c %d "$dir") || {
|
|
err "Can't stat '$dir'."
|
|
return 1
|
|
}
|
|
device=$(printf "%04x" $device)
|
|
hdev=${device:0:2}
|
|
ldev=${device:2:2}
|
|
inodes=$(find "$dir" -printf ':%i:\n')
|
|
|
|
found=
|
|
while read -r inode; do
|
|
debug "try inode:$inode"
|
|
if [[ "$inodes" == *":$inode:"* ]]; then
|
|
found=1
|
|
break
|
|
fi
|
|
done < <(cat /proc/locks | grep " $hdev:$ldev:" | sed -r "s/^.*$hdev:$ldev:([0-9]+).*$/\1/g")
|
|
|
|
[ "$found" ]
|
|
}
|
|
export -f are_files_locked_in_dir
|
|
|
|
|
|
set_db_params() {
|
|
local docker_ip="$1" docker_network="$2"
|
|
if [ -z "$DB_PARAMS_LOADED" ]; then
|
|
DB_PARAMS_LOADED=1
|
|
_set_db_params "$docker_ip" "$docker_network"
|
|
fi
|
|
}
|
|
export -f set_db_params
|
|
|
|
export _PID="$$"
|
|
ensure_db_docker_running () {
|
|
local _STATE_FILE errlvl project
|
|
|
|
_DB_NAME="db_${DB_NAME}_${_PID}"
|
|
_STATE_FILE="$state_tmpdir/${_DB_NAME}.state"
|
|
if [ -e "$_STATE_FILE" ]; then
|
|
IFS=: read DOCKER_NETWORK DOCKER_IP <<<"$(cat "$_STATE_FILE")"
|
|
|
|
debug "Re-using previous docker/connection '$DOCKER_IP'."
|
|
set_db_params "$DOCKER_IP" "$DOCKER_NETWORK"
|
|
|
|
return 0
|
|
fi
|
|
|
|
if [ -e "$state_tmpdir/${_DB_NAME}.working" ]; then
|
|
## avoid recursive calls.
|
|
if [ -z "$DOCKER_IP" ]; then
|
|
err "Currently figuring up DOCKER_IP, please set it yourself before this call if needed."
|
|
return 1
|
|
else
|
|
debug "ignoring recursive call of 'ensure_db_docker_running'."
|
|
fi
|
|
return 0
|
|
fi
|
|
|
|
touch "$state_tmpdir/${_DB_NAME}.working"
|
|
|
|
docker rm "$_DB_NAME" 2>/dev/null || true
|
|
|
|
host_db_working_dir="$HOST_DATASTORE/${SERVICE_NAME}$DB_DATADIR"
|
|
|
|
if is_db_locked; then
|
|
info "Some process is using '$host_db_working_dir'. Trying to find a docker that would do this..."
|
|
found=
|
|
for docker_id in $(docker ps -q); do
|
|
has_volume_mounted=$(
|
|
docker inspect \
|
|
--format "{{range .Mounts}}{{if eq .Destination \"$DB_DATADIR\"}}{{.Source}}{{end}}{{end}}" \
|
|
"$docker_id")
|
|
if [ "$has_volume_mounted" == "$host_db_working_dir" ]; then
|
|
info "docker '$docker_id' uses '$has_volume_mounted'."
|
|
project=$(docker inspect "$docker_id" \
|
|
--format "{{index .Config.Labels \"compose.project\" }}") || continue
|
|
info "docker '$docker_id' is from project '$project' (current project is '$PROJECT_NAME')."
|
|
[ "$project" == "$PROJECT_NAME" ] || continue
|
|
found="$docker_id"
|
|
break
|
|
fi
|
|
done
|
|
if [ -z "$found" ]; then
|
|
err "Please shutdown any other docker using this directory."
|
|
return 1
|
|
fi
|
|
export container_id="$found"
|
|
info "Found docker $docker_id is already running."
|
|
else
|
|
verb "Database is not locked."
|
|
if ! docker_has_image "$DOCKER_BASE_IMAGE"; then
|
|
err "Unexpected missing docker image $DOCKER_BASE_IMAGE."
|
|
return 1
|
|
fi
|
|
|
|
_set_server_db_params || return 1
|
|
debug docker network create "$_DB_NAME"
|
|
if ! network_id=$(docker network create "$_DB_NAME"); then
|
|
err "'docker network create $_DB_NAME' failed !"
|
|
_clean_docker "$_DB_NAME" "$container_id"
|
|
rm "$state_tmpdir/${_DB_NAME}.working"
|
|
return 1
|
|
fi
|
|
debug docker run -d \
|
|
--name "$_DB_NAME" \
|
|
"${server_docker_opts[@]}" \
|
|
--network "$_DB_NAME" \
|
|
-v "$host_db_working_dir:$DB_DATADIR" \
|
|
"$DOCKER_BASE_IMAGE"
|
|
if ! container_id=$(
|
|
docker run -d \
|
|
--name "$_DB_NAME" \
|
|
"${server_docker_opts[@]}" \
|
|
--network "$_DB_NAME" \
|
|
-v "$host_db_working_dir:$DB_DATADIR" \
|
|
"$DOCKER_BASE_IMAGE"
|
|
); then
|
|
err "'docker run' failed !"
|
|
_clean_docker "$_DB_NAME" "$container_id"
|
|
rm "$state_tmpdir/${_DB_NAME}.working"
|
|
return 1
|
|
fi
|
|
trap_add EXIT,ERR "_clean_docker \"$_DB_NAME\" \"$container_id\""
|
|
fi
|
|
|
|
if docker_ip=$(wait_for_docker_ip "$container_id"); then
|
|
IFS=: read DOCKER_NETWORK DOCKER_IP <<<"$docker_ip"
|
|
echo "$docker_ip" > "$_STATE_FILE"
|
|
debug "written '$_STATE_FILE'"
|
|
rm "$state_tmpdir/${_DB_NAME}.working"
|
|
set_db_params "$DOCKER_IP" "$DOCKER_NETWORK"
|
|
return 0
|
|
else
|
|
errlvl="$?"
|
|
err "Db not found (errlvl: $errlvl). Tail of docker logs follows:"
|
|
docker logs --tail=5 "$container_id" 2>&1 | prefix " | " >&2
|
|
rm "$state_tmpdir/${_DB_NAME}.working"
|
|
return "$errlvl"
|
|
fi
|
|
}
|
|
export -f ensure_db_docker_running
|
|
|
|
|
|
## Require to set $db_docker_opts if needed, and $DB_PASSFILE
|
|
##
|
|
_dcmd() {
|
|
local docker_opts command="$1"
|
|
shift
|
|
|
|
debug "Db> $command $@"
|
|
|
|
if [ "$HOST_DB_PASSFILE" -a -f "$LOCAL_DB_PASSFILE" -a "$CLIENT_DB_PASSFILE" ]; then
|
|
verb "Found and using '$HOST_DB_PASSFILE' as '$CLIENT_DB_PASSFILE'."
|
|
docker_opts=("${db_docker_opts[@]}" "-v" "$HOST_DB_PASSFILE:$CLIENT_DB_PASSFILE")
|
|
else
|
|
docker_opts=("${db_docker_opts[@]}")
|
|
fi
|
|
|
|
## XXXX was here: actualy, we need only connection between this version and the client version
|
|
debug docker run -i --rm \
|
|
"${docker_opts[@]}" \
|
|
--entrypoint "$command" "$DOCKER_BASE_IMAGE" "${db_cmd_opts[@]}" "$@"
|
|
docker run -i --rm \
|
|
"${docker_opts[@]}" \
|
|
--entrypoint "$command" "$DOCKER_BASE_IMAGE" "${db_cmd_opts[@]}" "$@"
|
|
}
|
|
export -f _dcmd
|
|
|
|
|
|
## Executes code through db
|
|
dcmd() {
|
|
local fun
|
|
[ "$DB_NAME" ] || print_syntax_error "$FUNCNAME: You must provide \$DB_NAME."
|
|
[ "$DB_DATADIR" ] || print_syntax_error "$FUNCNAME: You must provide \$DB_DATADIR."
|
|
# [ "$DB_PASSFILE" ] || print_syntax_error "$FUNCNAME: You must provide \$DB_PASSFILE."
|
|
[ "$_PID" ] || print_syntax_error "$FUNCNAME: You must provide \$_PID."
|
|
for fun in is_db_locked _set_db_params ddb; do
|
|
[ "$(type -t "$fun")" == "function" ] ||
|
|
print_syntax_error "$FUNCNAME: You must provide function '$fun'."
|
|
done
|
|
ensure_db_docker_running </dev/null || return 1
|
|
|
|
_dcmd "$@"
|
|
}
|
|
export -f dcmd
|
|
|
|
|
|
get_docker_ips() {
|
|
local name="$1" ip format network_id
|
|
if ! docker inspect --format='{{ .NetworkSettings.Networks }}' "$name" >/dev/null 2>&1; then
|
|
echo "default:$(docker inspect --format='{{ .NetworkSettings.IPAddress }}' "$name" 2>/dev/null)"
|
|
else
|
|
format='{{range $name, $conf := .NetworkSettings.Networks}}{{$name}}{{"\x00"}}{{$conf.IPAddress}}{{"\x00"}}{{end}}'
|
|
while read-0 network_id ip; do
|
|
printf "%s:%s\n" "$network_id" "$ip"
|
|
done < <(docker inspect --format="$format" "$name")
|
|
fi
|
|
}
|
|
export -f get_docker_ips
|
|
|
|
|
|
get_docker_ip() {
|
|
local name="$1"
|
|
get_docker_ips "$name"
|
|
}
|
|
export -f get_docker_ip
|
|
|
|
|
|
wait_docker_ip() {
|
|
local name="$1" timeout="${2:-15}" timeout_count=0 docker_ip=
|
|
start=$SECONDS
|
|
while [ -z "$docker_ip" ]; do
|
|
sleep 0.5
|
|
docker_ip=$(get_docker_ip "$name") && break
|
|
elapsed=$((SECONDS - start))
|
|
if ((elapsed > timeout)); then
|
|
err "${RED}timeout error${NORMAL}(${timeout}s):" \
|
|
"Could not find '$name' docker container's IP."
|
|
return 1
|
|
fi
|
|
[ "$elapsed" == "$old_elapsed" ] ||
|
|
verb "Waiting for docker $name... ($elapsed/$timeout)"
|
|
old_elapsed="$elapsed"
|
|
done
|
|
verb "Found docker $name network and IP: $docker_ip"
|
|
echo "$docker_ip"
|
|
}
|
|
export -f wait_docker_ip
|
|
|
|
|
|
wait_for_tcp_port() {
|
|
local network=$1 host_port=$2 timeout=${3:-60}
|
|
verb "Trying to connect to $host_port"
|
|
bash_image=${DEFAULT_BASH_IMAGE:-docker.0k.io/bash}
|
|
#echo docker run --rm -i --network "$network" "$bash_image" >&2
|
|
docker run --rm -i --network "$network" "$bash_image" <<EOF
|
|
start=\$SECONDS
|
|
while true; do
|
|
timeout 1 bash -c "</dev/tcp/${host_port/://}" >/dev/null 2>&1 && break
|
|
sleep 0.2
|
|
if [ "\$((SECONDS - start))" -gt "$timeout" ]; then
|
|
exit 1
|
|
fi
|
|
done
|
|
exit 0
|
|
EOF
|
|
if [ "$?" != 0 ]; then
|
|
err "${RED}timeout error${NORMAL}(${timeout}s):"\
|
|
"Could not connect to $host_port."
|
|
return 1
|
|
fi
|
|
return 0
|
|
}
|
|
export -f wait_for_tcp_port
|
|
|
|
|
|
## Warning: requires a ``ddb`` matching current database to be checked
|
|
wait_for_docker_ip() {
|
|
local name=$1 DOCKER_IP= DOCKER_NETWORK= docker_ips= docker_ip= elapsed timeout=10
|
|
docker_ip=$(wait_docker_ip "$name" 5) || return 1
|
|
IFS=: read DOCKER_NETWORK DOCKER_IP <<<"$docker_ip"
|
|
if ! str_is_ipv4 "$DOCKER_IP"; then
|
|
err "internal 'wait_docker_ip' did not return a valid IP. Returned IP is '$DOCKER_IP'."
|
|
return 1
|
|
fi
|
|
set_db_params "$DOCKER_IP" "$DOCKER_NETWORK"
|
|
while read-0 port; do
|
|
IFS="/" read port type <<<"$port"
|
|
[ "$type" == "tcp" ] || continue
|
|
wait_for_tcp_port "$DOCKER_NETWORK" "$DOCKER_IP:${port}" || return 17
|
|
info "Host/Port $DOCKER_IP:${port} checked ${GREEN}open${NORMAL}."
|
|
## XXXvlab: what to do with more than one port ?
|
|
break
|
|
done < <(image_exposed_ports_0 "$container_id")
|
|
|
|
## Checking direct connection
|
|
timeout=120
|
|
start=$SECONDS
|
|
while true; do
|
|
if err=$(echo "$check_command" | ddb 2>&1 >/dev/null); then
|
|
break
|
|
fi
|
|
if ! [[ "$err" == *"the database system is starting up" ]]; then
|
|
err "${RED}db connection error${NORMAL}:" \
|
|
"Could not connect to db on $DOCKER_IP container's IP."
|
|
echo " Note: IP up, TCP ports is(are) open" >&2
|
|
if [ "$err" ]; then
|
|
echo " Error:" >&2
|
|
printf "%s\n" "$err" | prefix " ${RED}!${NORMAL} " >&2
|
|
fi
|
|
return 18
|
|
fi
|
|
debug "Got 'database system is starting up' error."
|
|
elapsed=$((SECONDS - start))
|
|
if ((elapsed > timeout)); then
|
|
err "${RED}db connection error${NORMAL}:"\
|
|
"Could not connect to db on $DOCKER_IP" \
|
|
"container's IP. (IP up, TCP ports is(are) open, sql answer after ${timeout}s)"
|
|
return 1
|
|
fi
|
|
sleep 0.2
|
|
done
|
|
echo "${DOCKER_NETWORK}:${DOCKER_IP}"
|
|
return 0
|
|
}
|
|
export -f wait_for_docker_ip
|
|
|
|
|
|
docker_add_host_declaration() {
|
|
local src_docker=$1 domain=$2 dst_docker=$3 dst_docker_ip= dst_docker_network
|
|
dst_docker_ip=$(wait_docker_ip "$dst_docker") || exit 1
|
|
|
|
IFS=: read dst_docker_ip dst_docker_network <<<"$dst_docker_ip"
|
|
|
|
docker exec -i "$src_docker" bash <<EOF
|
|
if cat /etc/hosts | grep -E "^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\s+$domain\$" > /dev/null 2>&1; then
|
|
sed -ri "s/^[0-9]+\.[0-9]+\.[0-9]+\.[0-9]+\s+$domain\$/$dst_docker_ip $domain/g" /etc/hosts
|
|
else
|
|
echo "$dst_docker_ip $domain" >> /etc/hosts
|
|
fi
|
|
EOF
|
|
}
|
|
export -f docker_add_host_declaration
|
|
|
|
|
|
get_running_containers_for_service() {
|
|
local service="$1" project="$2"
|
|
|
|
project=${project:-$PROJECT_NAME}
|
|
|
|
[ -n "$project" ] || {
|
|
err "No project name was defined yet."
|
|
return 1
|
|
}
|
|
|
|
docker ps \
|
|
--filter label="compose.project=$project" \
|
|
--filter label="compose.master-service=$service" \
|
|
--format="{{.ID}}"
|
|
}
|
|
export -f get_running_containers_for_service
|
|
|
|
|
|
get_container_network_ips() {
|
|
local container="$1"
|
|
docker inspect "$container" \
|
|
--format='{{range $key, $val :=.NetworkSettings.Networks}}{{$key}}{{"\x00"}}{{$val.IPAddress}}{{"\x00"}}{{end}}'
|
|
}
|
|
export -f get_container_network_ips
|
|
|
|
|
|
get_container_network_ip() {
|
|
local container="$1"
|
|
while read-0 network ip; do
|
|
printf "%s\0" "$network" "$ip"
|
|
break
|
|
done < <(get_container_network_ips "$container")
|
|
}
|
|
export -f get_container_network_ip
|
|
|
|
|
|
##
|
|
## Internal Process
|
|
##
|
|
|
|
|
|
get_docker_compose_links() {
|
|
local service="$1" cache_file="$state_tmpdir/$FUNCNAME.cache.$1" \
|
|
deps master_service master_target_service _relation_name \
|
|
target_service _relation_config tech_dep
|
|
if [ -z "$service" ]; then
|
|
print_syntax_error "$FUNCNAME: Please specify a service as first argument."
|
|
return 1
|
|
fi
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
master_service=$(get_top_master_service_for_service "$service") || return 1
|
|
deps=()
|
|
while read-0 _relation_name target_service _relation_config tech_dep; do
|
|
master_target_service="$(get_top_master_service_for_service "$target_service")" || return 1
|
|
[ "$master_service" == "$master_target_service" ] && continue
|
|
type="$(get_service_type "$target_service")" || return 1
|
|
[ "$type" == "stub" ] && continue
|
|
[ "$type" == "run-once" ] && continue
|
|
if [ "$tech_dep" == "reversed" ]; then
|
|
deps+=("$(echo -en "$master_target_service:\n links:\n - $master_service")")
|
|
elif [[ "$tech_dep" =~ ^(True|true)$ ]]; then
|
|
deps+=("$(echo -en "$master_service:\n links:\n - $master_target_service")")
|
|
fi
|
|
## XXXvlab: an attempt to add depends_on, but this doesn't work well actually
|
|
## as there's a circular dependency issue. We don't really want the full feature
|
|
## of depends_on, but just to add it as targets when doing an 'up'
|
|
# deps+=("$(echo -en "$master_service:\n depends_on:\n - $master_target_service")")
|
|
done < <(get_service_relations "$service")
|
|
merge_yaml_str "${deps[@]}" | tee "$cache_file" || return 1
|
|
if [ "${PIPESTATUS[0]}" != 0 ]; then
|
|
rm "$cache_file"
|
|
err "Failed to merge YAML from all ${WHITE}links${NORMAL} dependencies."
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
|
|
_get_docker_compose_opts() {
|
|
local service="$1" cache_file="$state_tmpdir/$FUNCNAME.cache.$1" \
|
|
compose_def master_service docker_compose_opts
|
|
if [ -z "$service" ]; then
|
|
print_syntax_error "$FUNCNAME: Please specify a service as first argument."
|
|
return 1
|
|
fi
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
compose_def="$(get_compose_service_def "$service")" || return 1
|
|
master_service="$(get_top_master_service_for_service "$service")"
|
|
|
|
if docker_compose_opts=$(echo "$compose_def" | shyaml get-value -y "docker-compose" 2>/dev/null); then
|
|
yaml_key_val_str "$master_service" "$docker_compose_opts"
|
|
fi | tee "$cache_file"
|
|
if [ "${PIPESTATUS[0]}" != 0 ]; then
|
|
rm "$cache_file"
|
|
return 1
|
|
fi
|
|
}
|
|
|
|
|
|
##
|
|
## By Reading the metadata.yml, we create a docker-compose.yml mixin.
|
|
## Some metadata.yml (of subordinates) will indeed modify other
|
|
## services than themselves.
|
|
_get_docker_compose_service_mixin() {
|
|
local service="$1" cache_file="$state_tmpdir/$FUNCNAME.cache.$1" \
|
|
links_yaml base_mixin links_yaml docker_compose_options \
|
|
charm charm_part
|
|
if [ -z "$service" ]; then
|
|
print_syntax_error "$FUNCNAME: Please specify a service as first argument."
|
|
return 1
|
|
fi
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
type=$(get_service_type "$service") || return 1
|
|
[ "$type" == "stub" ] && return 0
|
|
|
|
master_service=$(get_top_master_service_for_service "$service") || {
|
|
err "Failed to get top master service for service $DARKYELLOW$service$NORMAL"
|
|
return 1
|
|
}
|
|
|
|
## The compose part
|
|
|
|
base_mixin="$master_service:
|
|
labels:
|
|
- compose.service=$service
|
|
- compose.master-service=${master_service}
|
|
- compose.project=$(get_default_project_name)"
|
|
links_yaml=$(get_docker_compose_links "$service") || return 1
|
|
docker_compose_options=$(_get_docker_compose_opts "$service") || return 1
|
|
|
|
## the charm part
|
|
|
|
charm_part=$(get_docker_compose_mixin_from_metadata "$service") || return 1
|
|
|
|
## Merge results
|
|
if [ "$charm_part" ]; then
|
|
charm_yaml="$(yaml_key_val_str "$master_service" "$charm_part")" || return 1
|
|
merge_yaml_str "$base_mixin" "$links_yaml" "$charm_yaml" "$docker_compose_options" || return 1
|
|
else
|
|
merge_yaml_str "$base_mixin" "$links_yaml" "$docker_compose_options" || return 1
|
|
fi | tee "$cache_file"
|
|
if [ "${PIPESTATUS[0]}" != 0 ]; then
|
|
err "Failed to constitute the base YAML for service '${DARKYELLOW}$service${NORMAL}'"
|
|
rm "$cache_file"
|
|
return 1
|
|
fi
|
|
|
|
}
|
|
export -f _get_docker_compose_service_mixin
|
|
|
|
|
|
##
|
|
## Get full `docker-compose.yml` format for all listed services (and
|
|
## their deps)
|
|
##
|
|
|
|
## @export
|
|
## @cache: !system !nofail +stdout
|
|
get_docker_compose () {
|
|
local cache_file="$state_tmpdir/$FUNCNAME.cache.$(echo "$*" | md5_compat)" \
|
|
entries services service start docker_compose_services
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
##
|
|
## Adding sub services configurations
|
|
##
|
|
|
|
declare -A entries
|
|
start_compilation=$SECONDS
|
|
debug "Compiling 'docker-compose.yml' base for ${DARKYELLOW}$*$NORMAL..."
|
|
for target_service in "$@"; do
|
|
start=$SECONDS
|
|
services=($(get_ordered_service_dependencies "$target_service")) || {
|
|
err "Failed to get dependencies for $DARKYELLOW$target_service$NORMAL"
|
|
return 1
|
|
}
|
|
|
|
if [ "$DEBUG" ]; then
|
|
debug " $DARKYELLOW$target_service$NORMAL deps:$DARKYELLOW" \
|
|
"${services[@]::$((${#services[@]} - 1))}" \
|
|
"$NORMAL$GRAY(in $((SECONDS - start))s)$NORMAL"
|
|
fi
|
|
for service in "${services[@]}"; do
|
|
|
|
if [ "${entries[$service]}" ]; then
|
|
## Prevent double inclusion of same service if this
|
|
## service is deps of two or more of your
|
|
## requirements.
|
|
continue
|
|
fi
|
|
|
|
## mark the service as "loaded" as well as it's containers
|
|
## if this is a subordinate service
|
|
start_service=$SECONDS
|
|
entries[$service]=$(_get_docker_compose_service_mixin "$service") || {
|
|
err "Failed to get service mixin for $DARKYELLOW$service$NORMAL"
|
|
return 1
|
|
}
|
|
debug " Applied $DARKYELLOW$service$NORMAL charm metadata mixins $GRAY(in $((SECONDS - start_service))s)$NORMAL"
|
|
done
|
|
debug " ..finished all mixins for $DARKYELLOW$target_service$NORMAL $GRAY(in $((SECONDS - start))s)$NORMAL"
|
|
done
|
|
|
|
docker_compose_services=$(merge_yaml_str "${entries[@]}") || {
|
|
err "Failed to merge YAML services entries together."
|
|
return 1
|
|
}
|
|
|
|
base_v2="version: '2.1'"
|
|
merge_yaml_str "$(yaml_key_val_str "services" "$docker_compose_services")" \
|
|
"$base_v2" > "$cache_file" || return 1
|
|
|
|
export _CURRENT_DOCKER_COMPOSE="$cache_file"
|
|
cat "$_CURRENT_DOCKER_COMPOSE"
|
|
debug " ..compilation of base 'docker-compose.yml' done $GRAY(in $((SECONDS - start_compilation))s)$NORMAL" || true
|
|
# debug " ** ${WHITE}docker-compose.yml${NORMAL}:"
|
|
# debug "$_current_docker_compose"
|
|
}
|
|
export -f get_docker_compose
|
|
|
|
|
|
_get_compose_service_def_cached () {
|
|
local service="$1" docker_compose="$2" cache_file="$CACHEDIR/$FUNCNAME.cache.$(echo "$*" | md5_compat)"
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: STATIC cache hit"
|
|
cat "$cache_file" &&
|
|
touch "$cache_file" || return 1
|
|
return 0
|
|
fi
|
|
value=$(echo "$docker_compose" | shyaml get-value "${service//./\\.}" 2>/dev/null)
|
|
[ "$value" == None ] && value=""
|
|
if ! echo "$value" | shyaml get-value "charm" >/dev/null 2>&1; then
|
|
if charm.exists "$service"; then
|
|
value=$(merge_yaml <(echo "charm: $service") <(echo "$value")) || {
|
|
err "Can't merge YAML infered 'charm: $service' with base ${DARKYELLOW}$service${NORMAL} YAML definition."
|
|
return 1
|
|
}
|
|
else
|
|
err "No ${WHITE}charm${NORMAL} value for service $DARKYELLOW$service$NORMAL" \
|
|
"in compose, nor same name charm found."
|
|
return 1
|
|
fi
|
|
fi
|
|
echo "$value" | tee "$cache_file" || return 1
|
|
# if [ "${PIPESTATUS[0]}" != 0 ]; then
|
|
# rm "$cache_file"
|
|
# return 1
|
|
# fi
|
|
return 0
|
|
# if [ "${PIPESTATUS[0]}" != 0 -o \! -s "$cache_file" ]; then
|
|
# rm "$cache_file"
|
|
# err "PAS OK $service: $value"
|
|
# return 1
|
|
# fi
|
|
}
|
|
export -f _get_compose_service_def_cached
|
|
|
|
|
|
## XXXvlab: a lot to be done to cache the results
|
|
get_compose_service_def () {
|
|
local service="$1" docker_compose cache_file="$state_tmpdir/$FUNCNAME.cache.$1" \
|
|
result
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: SESSION cache hit"
|
|
cat "$cache_file" || return 1
|
|
return 0
|
|
fi
|
|
|
|
[ -z "$service" ] && print_syntax_error "Missing service as first argument."
|
|
docker_compose=$(get_compose_yml_content) || return 1
|
|
result=$(_get_compose_service_def_cached "$service" "$docker_compose") || return 1
|
|
charm=$(echo "$result" | shyaml get-value charm 2>/dev/null) || return 1
|
|
metadata=$(charm.metadata "$charm") || return 1
|
|
if default_options=$(printf "%s" "$metadata" | shyaml -y -q get-value default-options); then
|
|
default_options=$(yaml_key_val_str "options" "$default_options") || return 1
|
|
result=$(merge_yaml_str "$default_options" "$result") || return 1
|
|
fi
|
|
echo "$result" | tee "$cache_file" || return 1
|
|
}
|
|
export -f get_compose_service_def
|
|
|
|
|
|
_get_service_charm_cached () {
|
|
local service="$1" service_def="$2" cache_file="$CACHEDIR/$FUNCNAME.cache.$(echo "$*" | md5_compat)"
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit $1"
|
|
cat "$cache_file" &&
|
|
touch "$cache_file" || return 1
|
|
return 0
|
|
fi
|
|
charm=$(echo "$service_def" | shyaml get-value charm 2>/dev/null)
|
|
if [ -z "$charm" ]; then
|
|
err "Missing ${WHITE}charm${NORMAL} value in service $DARKYELLOW$service$NORMAL definition."
|
|
return 1
|
|
fi
|
|
echo "$charm" | tee "$cache_file" || return 1
|
|
}
|
|
export -f _get_service_charm_cached
|
|
|
|
|
|
get_service_charm () {
|
|
local service="$1"
|
|
if [ -z "$service" ]; then
|
|
echo ${FUNCNAME[@]} >&2
|
|
print_syntax_error "$FUNCNAME: Please specify a service as first argument."
|
|
return 1
|
|
fi
|
|
service_def=$(get_compose_service_def "$service") || return 1
|
|
_get_service_charm_cached "$service" "$service_def"
|
|
}
|
|
export -f get_service_charm
|
|
|
|
|
|
## built above the docker-compose abstraction, so it relies on the
|
|
## full docker-compose.yml to be already built.
|
|
get_service_def () {
|
|
local service="$1" def
|
|
if [ -z "$_CURRENT_DOCKER_COMPOSE" ]; then
|
|
err "${FUNCNAME[0]} is meant to be called after"\
|
|
"\$_CURRENT_DOCKER_COMPOSE has been calculated."
|
|
echo " Called by:" >&2
|
|
printf " - %s\n" "${FUNCNAME[@]:1}" >&2
|
|
return 1
|
|
fi
|
|
|
|
def=$(cat "$_CURRENT_DOCKER_COMPOSE" | shyaml get-value "services.${service//./\\.}" 2>/dev/null)
|
|
if [ -z "$def" ]; then
|
|
err "No definition for service $DARKYELLOW$service$NORMAL in compiled 'docker-compose.yml'."
|
|
return 1
|
|
fi
|
|
echo "$def"
|
|
}
|
|
export -f get_service_def
|
|
|
|
get_build_hash() {
|
|
local dir="$1" cache_file="$state_tmpdir/$FUNCNAME.cache.$(H "$1")" hash
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
## Check that there's a Dockerfile in this directory
|
|
if [ ! -e "$dir/Dockerfile" ]; then
|
|
err "No 'Dockerfile' found in '$dir'."
|
|
return 1
|
|
fi
|
|
## use find to md5sum all files in the directory and make a final hash
|
|
hash=$(set -o pipefail; cd "$dir"; env -i find "." -type f -exec md5sum {} \; |
|
|
sort | md5sum | awk '{print $1}') || {
|
|
err "Failed to get hash for '$dir'."
|
|
return 1
|
|
}
|
|
printf "%s" "$hash" | tee "$cache_file"
|
|
return $?
|
|
}
|
|
export -f get_build_hash
|
|
|
|
### Query/Get cached image from registry
|
|
##
|
|
## Returns on stdout the name of the image if found, or an empty string if not
|
|
cache:image:registry:get() {
|
|
local charm="$1" hash="$2" service="$3"
|
|
local charm_image_name="cache/charm/$charm"
|
|
local charm_image="$charm_image_name:$hash"
|
|
|
|
Elt "pulling ${DARKPINK}$charm${NORMAL} image from $COMPOSE_DOCKER_REGISTRY" >&2
|
|
if out=$(docker pull "$COMPOSE_DOCKER_REGISTRY/$charm_image" 2>&1); then
|
|
docker tag "$COMPOSE_DOCKER_REGISTRY/$charm_image" "$charm_image" || {
|
|
err "Failed set image '$COMPOSE_DOCKER_REGISTRY/$charm_image' as '$charm_image'" \
|
|
"for ${DARKYELLOW}$service${NORMAL}."
|
|
return 1
|
|
}
|
|
print_info "found" >&2
|
|
print_status success >&2
|
|
Feed >&2
|
|
printf "%s" "$charm_image" | tee "$cache_file"
|
|
return $?
|
|
fi
|
|
if [[ "$out" != *"manifest unknown"* ]] && [[ "$out" != *"not found"* ]]; then
|
|
print_status failure >&2
|
|
Feed >&2
|
|
err "Failed to pull image '$COMPOSE_DOCKER_REGISTRY/$charm_image'" \
|
|
"for ${DARKYELLOW}$service${NORMAL}:"
|
|
e "$out"$'\n' | prefix " ${GRAY}|${NORMAL} " >&2
|
|
return 1
|
|
fi
|
|
print_info "not found" >&2
|
|
if test "$type_method" = "long"; then
|
|
__status="[${NOOP}ABSENT${NORMAL}]"
|
|
else
|
|
echo -n "${NOOP}"
|
|
shift; shift;
|
|
echo -n "$*${NORMAL}"
|
|
fi >&2
|
|
Feed >&2
|
|
}
|
|
export -f cache:image:registry:get
|
|
|
|
### Store cached image on registry
|
|
##
|
|
## Returns nothing
|
|
cache:image:registry:put() {
|
|
if [ -n "$COMPOSE_DOCKER_REGISTRY" ] && [ -n "$COMPOSE_PUSH_TO_REGISTRY" ]; then
|
|
local charm="$1" hash="$2" service="$3"
|
|
local charm_image_name="cache/charm/$charm"
|
|
local charm_image="$charm_image_name:$hash"
|
|
|
|
Wrap -d "pushing ${DARKPINK}$charm${NORMAL} image to $COMPOSE_DOCKER_REGISTRY" <<EOF || return 1
|
|
docker tag "$charm_image" "$COMPOSE_DOCKER_REGISTRY/$charm_image" &&
|
|
docker push "$COMPOSE_DOCKER_REGISTRY/$charm_image"
|
|
EOF
|
|
fi >&2
|
|
}
|
|
export -f cache:image:registry:put
|
|
|
|
|
|
### Produce docker cached charm image 'cache/charm/$charm:$hash'
|
|
##
|
|
## Either by fetching it from a registry or by building it from a
|
|
## Dockerfile.
|
|
cache:image:produce() {
|
|
local type="$1" src="$2" charm="$3" hash="$4" service="$5"
|
|
local charm_image_name="cache/charm/$charm"
|
|
local charm_image="$charm_image_name:$hash"
|
|
|
|
case "$type" in
|
|
fetch)
|
|
local specified_image="$src"
|
|
## will not pull upstream image if already present locally
|
|
if ! docker_has_image "${specified_image}"; then
|
|
if ! out=$(docker pull "${specified_image}" 2>&1); then
|
|
err "Failed to pull image '$specified_image' for ${DARKYELLOW}$service${NORMAL}:"
|
|
echo "$out" | prefix " | " >&2
|
|
return 1
|
|
fi
|
|
fi
|
|
|
|
# specified_image_id=$(docker_image_id "$specified_image") || return 1
|
|
# charm_image_id=
|
|
# if docker_has_image "${image_dst}"; then
|
|
# charm_image_id=$(docker_image_id "${image_dst}") || return 1
|
|
# fi
|
|
# if [ "$specified_image_id" != "$charm_image_id" ]; then
|
|
docker tag "$specified_image" "${charm_image}" || return 1
|
|
# fi
|
|
;;
|
|
build)
|
|
local service_build="$src"
|
|
build_opts=()
|
|
if [ "$COMPOSE_ACTION" == "build" ]; then
|
|
while read-0 arg; do
|
|
case "$arg" in
|
|
-t|--tag)
|
|
## XXXvlab: doesn't seem to be actually a valid option
|
|
if [ -n "$COMPOSE_PUSH_TO_REGISTRY" ]; then
|
|
err "You can't use -t|--tag option when pushing to a registry."
|
|
exit 1
|
|
fi
|
|
has_named_image=true
|
|
read-0 val ## should always be okay because already checked
|
|
build_opts+=("$arg" "$val")
|
|
;;
|
|
--help|-h)
|
|
docker-compose "$action" --help |
|
|
filter_docker_compose_help_message >&2
|
|
exit 0
|
|
;;
|
|
--*|-*)
|
|
if str_pattern_matches "$arg" $DC_MATCH_MULTI; then
|
|
read-0 value
|
|
build_opts+=("$arg" "$value")
|
|
shift
|
|
elif str_pattern_matches "$arg" $DC_MATCH_SINGLE; then
|
|
build_opts+=("$arg")
|
|
else
|
|
err "Unexpected error while parsing a second time the build arguments."
|
|
fi
|
|
;;
|
|
*)
|
|
## Already parsed
|
|
build_opts+=("$arg")
|
|
;;
|
|
esac
|
|
done < <(cla.normalize "${action_opts[@]}")
|
|
fi
|
|
if [ -z "$has_named_image" ]; then
|
|
build_opts+=(-t "${charm_image}")
|
|
fi
|
|
|
|
Wrap -v -d "Building ${DARKPINK}$charm${NORMAL}:$hash image" -- \
|
|
docker build "$service_build" -t "${charm_image}" "${build_opts[@]}" >&2 || {
|
|
err "Failed to build image '${charm_image}' for ${DARKYELLOW}$service${NORMAL}."
|
|
return 1
|
|
}
|
|
if [ -n "$has_named_image" ]; then
|
|
exit 0
|
|
fi
|
|
;;
|
|
*)
|
|
err "Unknown type '$type'."
|
|
return 1
|
|
;;
|
|
esac
|
|
}
|
|
export -f cache:image:produce
|
|
|
|
service_ensure_image_ready() {
|
|
local service="$1" cache_file="$state_tmpdir/$FUNCNAME.cache.$1" \
|
|
master_service service_def service_image service_build service_dockerfile image \
|
|
specified_image specified_image_id charm_image_name hash \
|
|
service_quoted
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: cache hit ($*)"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
if [ -z "$_CURRENT_DOCKER_COMPOSE" ]; then
|
|
err "${FUNCNAME[0]} is meant to be called after"\
|
|
"\$_CURRENT_DOCKER_COMPOSE has been calculated."
|
|
echo " Called by:" >&2
|
|
printf " - %s\n" "${FUNCNAME[@]:1}" >&2
|
|
return 1
|
|
fi
|
|
|
|
master_service="$(get_top_master_service_for_service "$service")" || {
|
|
err "Could not compute master service for service $DARKYELLOW$service$NORMAL."
|
|
return 1
|
|
}
|
|
if [ "$master_service" != "$service" ]; then
|
|
image=$(service_ensure_image_ready "$master_service") || return 1
|
|
printf "%s" "$image" | tee "$cache_file"
|
|
return $?
|
|
fi
|
|
|
|
## check if \$_CURRENT_DOCKER_COMPOSE's service def is already correctly setup
|
|
local charm="$(get_service_charm "$service")" || return 1
|
|
local charm_image_name="cache/charm/$charm" || return 1
|
|
local service_def="$(get_service_def "$service")" || {
|
|
err "Could not get docker-compose service definition for $DARKYELLOW$service$NORMAL."
|
|
return 1
|
|
}
|
|
local service_quoted=${service//./\\.}
|
|
|
|
if specified_image=$(echo "$service_def" | shyaml get-value image 2>/dev/null); then
|
|
if [ "$specified_image" == "$charm_image_name"* ]; then
|
|
## Assume we already did the change
|
|
printf "%s" "$specified_image" | tee "$cache_file"
|
|
return 0
|
|
fi
|
|
if [[ "$specified_image" == "${COMPOSE_DOCKER_REGISTRY}/"* ]]; then
|
|
if ! docker_has_image "${specified_image}"; then
|
|
Wrap "${wrap_opts[@]}" \
|
|
-v -d "pulling ${DARKPINK}$charm${NORMAL}'s specified image from $COMPOSE_DOCKER_REGISTRY" -- \
|
|
docker pull "${specified_image}" >&2 || return 1
|
|
else
|
|
if [ -n "$DEBUG" ]; then
|
|
Elt "using local ${DARKPINK}$charm${NORMAL}'s specified image from $COMPOSE_DOCKER_REGISTRY" >&2
|
|
print_status noop >&2
|
|
Feed >&2
|
|
fi
|
|
fi
|
|
## Already on the cache server
|
|
printf "%s" "$specified_image" | tee "$cache_file"
|
|
return 0
|
|
fi
|
|
src="$specified_image"
|
|
hash=$(echo "$specified_image" | md5sum | cut -f 1 -d " ") || return 1
|
|
type=fetch
|
|
|
|
## replace image by charm image
|
|
yq -i ".services.[\"${service_quoted}\"].image = \"${charm_image_name}:${hash}\"" \
|
|
"$_CURRENT_DOCKER_COMPOSE" || return 1
|
|
else
|
|
|
|
if ! src=$(echo "$service_def" | shyaml get-value build 2>/dev/null); then
|
|
err "Service $DARKYELLOW$service$NORMAL has no ${WHITE}image${NORMAL} nor ${WHITE}build${NORMAL} parameter."
|
|
echo "$service_def" >&2
|
|
return 1
|
|
fi
|
|
|
|
## According to https://stackoverflow.com/questions/32230577 , if there's a build,
|
|
## then the built image will get name ${project}_${service}
|
|
hash=$(get_build_hash "$src") || return 1
|
|
type=build
|
|
## delete build key from service_def and add image to charm_image_name
|
|
yq -i "del(.services.[\"${service_quoted}\"].build) |
|
|
.services.[\"${service_quoted}\"].image = \"${charm_image_name}:${hash}\"" \
|
|
"$_CURRENT_DOCKER_COMPOSE" || return 1
|
|
|
|
fi
|
|
if [ "$COMPOSE_ACTION" != "build" ] && docker_has_image "${charm_image_name}:${hash}"; then
|
|
if [ -n "$DEBUG" ]; then
|
|
Elt "using ${DARKPINK}$charm${NORMAL}'s image from local cache" >&2
|
|
print_status noop >&2
|
|
Feed >&2
|
|
fi
|
|
cache:image:registry:put "$charm" "$hash" "$service" || return 1
|
|
printf "%s" "${charm_image_name}:${hash}" | tee "$cache_file"
|
|
return $?
|
|
fi
|
|
|
|
## Can we pull it ? Let's check on $COMPOSE_DOCKER_REGISTRY
|
|
if [ "$COMPOSE_ACTION" != "build" ] && [ -n "$COMPOSE_DOCKER_REGISTRY" ]; then
|
|
img=$(cache:image:registry:get "$charm" "$hash" "$service") || {
|
|
err "Failed to get image '$charm_image_name:$hash' from registry for ${DARKYELLOW}$service${NORMAL}."
|
|
return 1
|
|
}
|
|
[ -n "$img" ] && {
|
|
printf "%s" "$img" | tee "$cache_file"
|
|
return $?
|
|
}
|
|
fi
|
|
cache:image:produce "$type" "$src" "$charm" "$hash" "$service" || return 1
|
|
cache:image:registry:put "$charm" "$hash" "$service" || return 1
|
|
printf "%s" "${charm_image_name}:$hash" | tee "$cache_file"
|
|
return $?
|
|
}
|
|
export -f service_ensure_image_ready
|
|
|
|
|
|
get_charm_relation_def () {
|
|
local charm="$1" relation_name="$2" cache_file="$state_tmpdir/$FUNCNAME.cache.$1.$2" \
|
|
relation_def metadata
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
metadata="$(charm.metadata "$charm")" || return 1
|
|
relation_def="$(echo "$metadata" | shyaml get-value "provides.${relation_name}" 2>/dev/null)"
|
|
echo "$relation_def" | tee "$cache_file"
|
|
}
|
|
export -f get_charm_relation_def
|
|
|
|
|
|
get_charm_tech_dep_orientation_for_relation() {
|
|
local charm="$1" relation_name="$2" cache_file="$state_tmpdir/$FUNCNAME.cache.$1.$2" \
|
|
relation_def value
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
relation_def=$(get_charm_relation_def "$charm" "$relation_name" 2>/dev/null)
|
|
value=$(echo "$relation_def" | shyaml get-value 'tech-dep' 2>/dev/null)
|
|
value=${value:-True}
|
|
printf "%s" "$value" | tee "$cache_file"
|
|
}
|
|
export -f get_charm_tech_dep_orientation_for_relation
|
|
|
|
|
|
get_service_relation_tech_dep() {
|
|
local service="$1" relation_name="$2" cache_file="$state_tmpdir/$FUNCNAME.cache.$1.$2" \
|
|
charm tech_dep
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
charm=$(get_service_charm "$service") || return 1
|
|
tech_dep="$(get_charm_tech_dep_orientation_for_relation "$charm" "$relation_name")" || return 1
|
|
printf "%s" "$tech_dep" | tee "$cache_file"
|
|
}
|
|
export -f get_service_relation_tech_dep
|
|
|
|
|
|
##
|
|
## Use compose file to get deps, and relation definition in metadata.yml
|
|
## for tech-dep attribute.
|
|
get_service_deps() {
|
|
local service="$1" cache_file="$state_tmpdir/$FUNCNAME.cache.$(H "$@" "$ALL_RELATIONS")"
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
(
|
|
set -o pipefail
|
|
get_service_relations "$service" | \
|
|
while read-0 relation_name target_service _relation_config tech_dep; do
|
|
echo "$target_service"
|
|
done | tee "$cache_file"
|
|
) || return 1
|
|
}
|
|
export -f get_service_deps
|
|
|
|
## XXXvlab: cache was disabled because improper. Indeed, this needs to cache
|
|
## 'depths' full state. Second, it should be
|
|
_rec_get_depth() {
|
|
local elt=$1 dep deps max
|
|
[ "${depths[$elt]}" ] && return 0
|
|
local cache_file="$state_tmpdir/$FUNCNAME.cache.$(H "$@" "$(declare -pA depths)" "$ALL_RELATIONS")"
|
|
if [ -e "$cache_file.depths" ]; then
|
|
#debug "$FUNCNAME: cache hit ($*) - $cache_file.depths"
|
|
while read-0 k v; do
|
|
depths["$k"]="$v"
|
|
done < "$cache_file.depths"
|
|
while read-0 k v; do
|
|
visited["$k"]="$v"
|
|
done < "$cache_file.visited"
|
|
return 0
|
|
fi
|
|
|
|
visited[$elt]=1
|
|
#debug "Setting visited[$elt]"
|
|
#debug "Asking for $DARKYELLOW$elt$NORMAL dependencies"
|
|
deps=$(get_service_deps "$elt") || {
|
|
debug "Failed get_service_deps $elt"
|
|
return 1
|
|
}
|
|
# debug "$elt deps are:" $deps
|
|
max=0
|
|
for dep in $deps; do
|
|
[ "${visited[$dep]}" ] && {
|
|
#debug "Already computing $dep"
|
|
continue
|
|
}
|
|
_rec_get_depth "$dep" || return 1
|
|
#debug "Requesting depth[$dep]"
|
|
if (( ${depths[$dep]} > max )); then
|
|
max="${depths[$dep]}"
|
|
fi
|
|
done
|
|
# debug "Setting depth[$elt] to $((max + 1))"
|
|
depths[$elt]=$((max + 1))
|
|
array_kv_to_stdin depths > "$cache_file.depths"
|
|
array_kv_to_stdin visited > "$cache_file.visited"
|
|
# debug "DEPTHS: $(declare -pA depths)"
|
|
# debug "$FUNCNAME: caching hit ($*) - $cache_file"
|
|
}
|
|
export -f _rec_get_depth
|
|
|
|
|
|
get_ordered_service_dependencies() {
|
|
local services=("$@") cache_file="$state_tmpdir/$FUNCNAME.cache.$(H "$@" "$ALL_RELATIONS")" \
|
|
i value key heads depths visited
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: cache hit ($*)"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
#debug "Figuring ordered deps of $DARKYELLOW${services[@]}$NORMAL"
|
|
if [ -z "${services[*]}" ]; then
|
|
return 0
|
|
# print_syntax_error "$FUNCNAME: no arguments"
|
|
# return 1
|
|
fi
|
|
|
|
declare -A depths
|
|
declare -A visited
|
|
heads=("${services[@]}")
|
|
while [ "${#heads[@]}" != 0 ]; do
|
|
array_pop heads head
|
|
_rec_get_depth "$head" || return 1
|
|
done
|
|
|
|
i=0
|
|
while [ "${#depths[@]}" != 0 ]; do
|
|
for key in "${!depths[@]}"; do
|
|
value="${depths[$key]}"
|
|
if [ "$value" == "$i" ]; then
|
|
echo "$key"
|
|
unset depths[$key]
|
|
fi
|
|
done
|
|
((i++))
|
|
done | tee "$cache_file"
|
|
}
|
|
export -f get_ordered_service_dependencies
|
|
|
|
|
|
run_service_acquire_images () {
|
|
local service subservice subservices loaded
|
|
declare -A loaded
|
|
for service in "$@"; do
|
|
subservices=$(get_ordered_service_dependencies "$service") || return 1
|
|
for subservice in $subservices; do
|
|
if [ "${loaded[$subservice]}" ]; then
|
|
## Prevent double inclusion of same service if this
|
|
## service is deps of two or more of your
|
|
## requirements.
|
|
continue
|
|
fi
|
|
|
|
type=$(get_service_type "$subservice") || return 1
|
|
MASTER_BASE_SERVICE_NAME=$(get_top_master_service_for_service "$subservice") || return 1
|
|
if [ "$type" != "stub" ]; then
|
|
DOCKER_BASE_IMAGE=$(service_ensure_image_ready "$MASTER_BASE_SERVICE_NAME") || return 1
|
|
fi
|
|
|
|
loaded[$subservice]=1
|
|
done
|
|
done
|
|
return 0
|
|
}
|
|
|
|
run_service_hook () {
|
|
local action="$1" service subservice subservices loaded
|
|
shift
|
|
declare -A loaded
|
|
for service in "$@"; do
|
|
subservices=$(get_ordered_service_dependencies "$service") || return 1
|
|
for subservice in $subservices; do
|
|
if [ "${loaded[$subservice]}" ]; then
|
|
## Prevent double inclusion of same service if this
|
|
## service is deps of two or more of your
|
|
## requirements.
|
|
continue
|
|
fi
|
|
|
|
charm=$(get_service_charm "$subservice") || return 1
|
|
charm.has_hook "$charm" "$action" >/dev/null || continue
|
|
type=$(get_service_type "$subservice") || return 1
|
|
|
|
PROJECT_NAME=$(get_default_project_name) || return 1
|
|
MASTER_BASE_SERVICE_NAME=$(get_top_master_service_for_service "$subservice") || return 1
|
|
MASTER_BASE_CHARM_NAME=$(get_service_charm "$MASTER_BASE_SERVICE_NAME") || return 1
|
|
if [ "$type" != "stub" ]; then
|
|
DOCKER_BASE_IMAGE=$(service_ensure_image_ready "$MASTER_BASE_SERVICE_NAME") || return 1
|
|
fi
|
|
|
|
Wrap "${wrap_opts[@]}" -d "running $YELLOW$action$NORMAL hook of $DARKYELLOW$subservice$NORMAL in charm $DARKPINK$charm$NORMAL" <<EOF || return 1
|
|
|
|
export DOCKER_BASE_IMAGE="$DOCKER_BASE_IMAGE"
|
|
export SERVICE_NAME=$subservice
|
|
export IMAGE_NAME=$(echo "${PROJECT_NAME}" | tr -d "_-")_\${SERVICE_NAME}
|
|
export CONTAINER_NAME=\${IMAGE_NAME}_1
|
|
export CHARM_NAME="$charm"
|
|
export PROJECT_NAME="$PROJECT_NAME"
|
|
export SERVICE_DATASTORE="$DATASTORE/$subservice"
|
|
export SERVICE_CONFIGSTORE="$CONFIGSTORE/$subservice"
|
|
export MASTER_BASE_SERVICE_NAME="$MASTER_BASE_SERVICE_NAME"
|
|
export MASTER_BASE_CHARM_NAME="$MASTER_BASE_CHARM_NAME"
|
|
|
|
charm.run_hook "local" "$charm" "$action"
|
|
|
|
EOF
|
|
loaded[$subservice]=1
|
|
done
|
|
done
|
|
return 0
|
|
}
|
|
|
|
|
|
host_resource_get() {
|
|
local location="$1" cfg="$2"
|
|
type=$(echo "$cfg" | shyaml get-value type 2>/dev/null) || {
|
|
err "Missing ${WHITE}type$NORMAL option in ${WHITE}get$NORMAL config for location '$location'"
|
|
return 1
|
|
}
|
|
if fn.exists host_resource_get_$type; then
|
|
host_resource_get_$type "$location" "$cfg"
|
|
else
|
|
err "Source ${WHITE}source$NORMAL type '$type' unknown for" \
|
|
"${WHITE}host-resource$NORMAL '$location' defined in" \
|
|
"$DARKYELLOW$subservice$NORMAL config."
|
|
return 1
|
|
fi
|
|
}
|
|
export -f host_resource_get
|
|
|
|
|
|
host_resource_get_git() {
|
|
local location="$1" cfg="$2" branch parent url
|
|
branch=$(echo "$cfg" | shyaml get-value branch 2>/dev/null)
|
|
branch=${branch:-master}
|
|
|
|
url=$(echo "$cfg" | shyaml get-value url 2>/dev/null)
|
|
parent="$(dirname "$location")"
|
|
(
|
|
mkdir -p "$parent" && cd "$parent" &&
|
|
git clone -b "$branch" "$url" "$(basename "$location")"
|
|
) || return 1
|
|
}
|
|
export -f host_resource_get_git
|
|
|
|
|
|
host_resource_get_git-sub() {
|
|
local location="$1" cfg="$2" branch parent url
|
|
branch=$(echo "$cfg" | shyaml get-value branch 2>/dev/null)
|
|
branch=${branch:-master}
|
|
|
|
url=$(echo "$cfg" | shyaml get-value url 2>/dev/null)
|
|
parent="$(dirname "$location")"
|
|
(
|
|
mkdir -p "$parent" && cd "$parent" &&
|
|
git sub clone -b "$branch" "$url" "$(basename "$location")"
|
|
) || return 1
|
|
}
|
|
export -f host_resource_get_git-sub
|
|
|
|
|
|
setup_host_resource () {
|
|
local subservice="$1" service_def location get cfg
|
|
|
|
service_def=$(get_compose_service_def "$subservice") || return 1
|
|
while read-0 location cfg; do
|
|
## XXXvlab: will it be a git resources always ?
|
|
if [ -d "$location" -a ! -d "$location/.git" ]; then
|
|
err "Hum, location '$location' does not seem to be a git directory."
|
|
return 1
|
|
fi
|
|
if [ -d "$location" ]; then
|
|
info "host resource '$location' already set up."
|
|
continue
|
|
fi
|
|
get=$(echo "$cfg" | shyaml get-value get 2>/dev/null)
|
|
|
|
if [ -z "$get" ]; then
|
|
err "No host directory '$location' found, and no ${WHITE}source$NORMAL" \
|
|
"specified for $DARKYELLOW$subservice$NORMAL."
|
|
return 1
|
|
fi
|
|
host_resource_get "$location" "$get" || return 1
|
|
done < <(echo "$service_def" | shyaml key-values-0 host-resources 2>/dev/null)
|
|
}
|
|
export -f setup_host_resource
|
|
|
|
|
|
setup_host_resources () {
|
|
local service subservices subservice loaded
|
|
|
|
declare -A loaded
|
|
for service in "$@"; do
|
|
subservices=$(get_ordered_service_dependencies "$service") || return 1
|
|
for subservice in $subservices; do
|
|
if [ "${loaded[$subservice]}" ]; then
|
|
## Prevent double inclusion of same service if this
|
|
## service is deps of two or more of your
|
|
## requirements.
|
|
continue
|
|
fi
|
|
setup_host_resource "$subservice" || return 1
|
|
loaded[$subservice]=1
|
|
done
|
|
done
|
|
return 0
|
|
}
|
|
export -f setup_host_resources
|
|
|
|
## Works on stdin
|
|
cfg-get-value () {
|
|
local key="$1" out
|
|
if [ -z "$key" ]; then
|
|
yaml_get_interpret || return 1
|
|
return 0
|
|
fi
|
|
if ! out=$(shyaml -y get-value "$key" 2>/dev/null); then
|
|
err "The key $WHITE$key$NORMAL was not found in relation's data."
|
|
return 1
|
|
fi
|
|
printf "%s\n" "$out" | yaml_get_interpret
|
|
}
|
|
export -f cfg-get-value
|
|
|
|
|
|
relation-get () {
|
|
cfg-get-value "$1" < "$RELATION_DATA_FILE"
|
|
}
|
|
export -f relation-get
|
|
|
|
|
|
expand_vars() {
|
|
local unlikely_prefix="UNLIKELY_PREFIX"
|
|
content=$(cat -)
|
|
## find first identifier not in content
|
|
remaining_lines=$(echo "$content" | grep "^$unlikely_prefix")
|
|
while [ "$(echo "$remaining_lines" | grep "^$unlikely_prefix$")" ]; do
|
|
size_prefix="${#unlikely_prefix}"
|
|
first_matching=$(echo "$remaining_lines" |
|
|
grep -v "^$unlikely_prefix$" |
|
|
uniq -w "$((size_prefix + 1))" -c |
|
|
sort -rn |
|
|
head -n 1)
|
|
first_matching=${first_matching#"${x%%[![:space:]]*}"}
|
|
first_matching="${first_matching#* }"
|
|
next_char=${first_matching:$size_prefix:1}
|
|
if [ "$next_char" != "0" ]; then
|
|
unlikely_prefix+="0"
|
|
else
|
|
unlikely_prefix+="1"
|
|
fi
|
|
remaining_lines=$(echo "$remaining_lines" | grep "^$unlikely_prefix")
|
|
done
|
|
eval "cat <<$unlikely_prefix
|
|
$content
|
|
$unlikely_prefix"
|
|
}
|
|
export -f expand_vars
|
|
|
|
|
|
yaml_get_interpret() {
|
|
local content tag
|
|
content=$(cat -)
|
|
tag=$(echo "$content" | shyaml get-type) || return 1
|
|
content=$(echo "$content" | shyaml get-value) || return 1
|
|
if ! [ "${tag:0:1}" == "!" ]; then
|
|
echo "$content" || return 1
|
|
return 0
|
|
fi
|
|
case "$tag" in
|
|
"!bash-stdout")
|
|
echo "$content" | bash || {
|
|
err "shell code didn't end with errorlevel 0"
|
|
return 1
|
|
}
|
|
;;
|
|
"!var-expand")
|
|
echo "$content" | expand_vars || {
|
|
err "shell expansion failed"
|
|
return 1
|
|
}
|
|
;;
|
|
"!file-content")
|
|
source=$(echo "$content" | expand_vars) || {
|
|
err "shell expansion failed"
|
|
return 1
|
|
}
|
|
cat "$source" || return 1
|
|
;;
|
|
*)
|
|
err "Invalid object tag ${WHITE}$tag${NORMAL}"
|
|
return 1
|
|
;;
|
|
esac
|
|
}
|
|
export -f yaml_get_interpret
|
|
|
|
|
|
options-get () {
|
|
local key="$1" out
|
|
service_def=$(get_compose_service_def "$SERVICE_NAME") || return 1
|
|
if ! out=$(echo "$service_def" | shyaml -y get-value "options.$key" 2>/dev/null); then
|
|
err "The key $WHITE$key$NORMAL was not found in base service compose definition.."
|
|
return 1
|
|
fi
|
|
echo "$out" | yaml_get_interpret
|
|
}
|
|
export -f options-get
|
|
|
|
|
|
relation-base-compose-get () {
|
|
local key="$1" out
|
|
if ! out=$(echo "$RELATION_BASE_COMPOSE_DEF" | shyaml -y get-value "options.$key" 2>/dev/null); then
|
|
err "The key $WHITE$key$NORMAL was not found in base service compose definition.."
|
|
return 1
|
|
fi
|
|
echo "$out" | yaml_get_interpret
|
|
}
|
|
export -f relation-base-compose-get
|
|
|
|
|
|
relation-target-compose-get () {
|
|
local key="$1" out
|
|
if ! out=$(echo "$RELATION_TARGET_COMPOSE_DEF" | shyaml -y get-value "options.$key" 2>/dev/null); then
|
|
err "The key $WHITE$key$NORMAL was not found in base service compose definition.."
|
|
return 1
|
|
fi
|
|
echo "$out" | yaml_get_interpret
|
|
}
|
|
export -f relation-target-compose-get
|
|
|
|
|
|
relation-set () {
|
|
local key="$1" value="$2"
|
|
if [ -z "$RELATION_DATA_FILE" ]; then
|
|
err "$FUNCNAME: relation does not seems to be correctly setup."
|
|
return 1
|
|
fi
|
|
|
|
if ! [ -r "$RELATION_DATA_FILE" ]; then
|
|
err "$FUNCNAME: can't read relation's data." >&2
|
|
return 1
|
|
fi
|
|
|
|
_config_merge "$RELATION_DATA_FILE" <(yaml_key_val_str "$key" "$value")
|
|
}
|
|
export -f relation-set
|
|
|
|
|
|
_config_merge() {
|
|
local config_filename="$1" mixin="$2"
|
|
touch "$config_filename" &&
|
|
merge_yaml "$config_filename" "$mixin" > "$config_filename.tmp" || return 1
|
|
mv "$config_filename.tmp" "$config_filename"
|
|
}
|
|
export -f _config_merge
|
|
|
|
|
|
## XXXvlab; this can be used only in relation, I'd like to use it in init.
|
|
config-add() {
|
|
local metadata="$1"
|
|
_config_merge "$RELATION_CONFIG" <(echo "$metadata")
|
|
}
|
|
export -f config-add
|
|
|
|
|
|
## XXXvlab; this can be used only in relation, I'd like to use it in init.
|
|
init-config-add() {
|
|
local metadata="$1"
|
|
_config_merge "$state_tmpdir/to-merge-in-docker-compose.yml" \
|
|
<(yaml_key_val_str "services" "$metadata")
|
|
}
|
|
export -f init-config-add
|
|
|
|
|
|
docker_get_uid() {
|
|
local service="$1" user="$2" uid
|
|
uid=$(cached_cmd_on_base_image "$service" "id -u \"$user\"") || {
|
|
debug "Failed to query for '$user' uid in ${DARKYELLOW}$service${NORMAL} base image."
|
|
return 1
|
|
}
|
|
info "uid from ${DARKYELLOW}$service${NORMAL} for user '$user' is '$uid'"
|
|
echo "$uid"
|
|
}
|
|
export -f docker_get_uid
|
|
|
|
docker_get_uid_gid() {
|
|
local service="$1" user="$2" group="$3" uid
|
|
uid_gid=$(cached_cmd_on_base_image "$service" "id -u \"$user\"; id -g \"$group\"") || {
|
|
debug "Failed to query for '$user' uid in ${DARKYELLOW}$service${NORMAL} base image."
|
|
return 1
|
|
}
|
|
info "uid from ${DARKYELLOW}$service${NORMAL} for user '$user' is '$uid_gid'"
|
|
echo "$uid_gid"
|
|
}
|
|
export -f docker_get_uid_gid
|
|
|
|
|
|
logstdout() {
|
|
local name="$1"
|
|
sed -r 's%^%'"${name}"'> %g'
|
|
}
|
|
export -f logstdout
|
|
|
|
|
|
logstderr() {
|
|
local name="$1"
|
|
sed -r 's%^(.*)$%'"${RED}${name}>${NORMAL} \1"'%g'
|
|
}
|
|
export -f logstderr
|
|
|
|
|
|
_run_service_relation () {
|
|
local relation_name="$1" service="$2" target_service="$3" relation_config="$4" relation_dir services
|
|
local errlvl
|
|
|
|
charm=$(get_service_charm "$service") || return 1
|
|
target_charm=$(get_service_charm "$target_service") || return 1
|
|
|
|
base_script_name=$(charm.has_relation_hook "$charm" "$relation_name" relation-joined) || true
|
|
target_script_name=$(charm.has_relation_hook "$target_charm" "$relation_name" relation-joined) || true
|
|
[ -n "$base_script_name" ] || [ -n "$target_script_name" ] || return 0
|
|
|
|
relation_dir=$(get_relation_data_dir "$service" "$target_service" "$relation_name") || return 1
|
|
RELATION_DATA_FILE=$(get_relation_data_file "$service" "$target_service" "$relation_name" "$relation_config") || return 1
|
|
export BASE_SERVICE_NAME=$service
|
|
export BASE_CHARM_NAME=$charm
|
|
export BASE_CHARM_PATH=$(charm.get_dir "$charm")
|
|
export TARGET_SERVICE_NAME=$target_service
|
|
export TARGET_CHARM_NAME=$target_charm
|
|
export TARGET_CHARM_PATH=$(charm.get_dir "$target_charm")
|
|
export RELATION_DATA_FILE
|
|
target_errlvl=0
|
|
|
|
if [ -z "$target_script_name" ]; then
|
|
verb "No relation script $DARKBLUE$relation_name$NORMAL in target $DARKPINK$target_charm$NORMAL."
|
|
else
|
|
verb "Running ${DARKBLUE}$relation_name${NORMAL} relation-joined script" \
|
|
"for target $DARKYELLOW$target_service$NORMAL (charm $DARKPINK$target_charm$NORMAL)"
|
|
RELATION_CONFIG="$relation_dir/config_provider"
|
|
type=$(get_service_type "$target_service") || return 1
|
|
if [ "$type" != "stub" ]; then
|
|
DOCKER_BASE_IMAGE=$(service_ensure_image_ready "$target_service") || return 1
|
|
fi
|
|
export DOCKER_BASE_IMAGE RELATION_CONFIG RELATION_DATA
|
|
{
|
|
(
|
|
SERVICE_NAME=$target_service
|
|
SERVICE_DATASTORE="$DATASTORE/$target_service"
|
|
SERVICE_CONFIGSTORE="$CONFIGSTORE/$target_service"
|
|
export SERVICE_NAME DOCKER_BASE_IMAGE SERVICE_DATASTORE SERVICE_CONFIGSTORE
|
|
charm.run_relation_hook local "$target_charm" "$relation_name" relation-joined
|
|
echo "$?" > "$relation_dir/target_errlvl"
|
|
) | logstdout "$DARKYELLOW$target_service$NORMAL/$DARKBLUE$relation_name$NORMAL (joined) ${GREEN}@${NORMAL}"
|
|
} 3>&1 1>&2 2>&3 | logstderr "$DARKYELLOW$target_service$NORMAL/$DARKBLUE$relation_name$NORMAL (joined) ${RED}@${NORMAL}" 3>&1 1>&2 2>&3
|
|
target_errlvl="$(cat "$relation_dir/target_errlvl")" || {
|
|
err "Relation script '$script_name' in $DARKPINK$target_charm$NORMAL" \
|
|
"failed before outputing an errorlevel."
|
|
((target_errlvl |= "1" ))
|
|
}
|
|
if [ -e "$RELATION_CONFIG" ]; then
|
|
debug "Merging some new config info in $DARKYELLOW$target_service$NORMAL"
|
|
_config_merge "$state_tmpdir/to-merge-in-docker-compose.yml" "$RELATION_CONFIG" &&
|
|
rm "$RELATION_CONFIG"
|
|
((target_errlvl |= "$?"))
|
|
fi
|
|
fi
|
|
|
|
if [ "$target_errlvl" == 0 ]; then
|
|
errlvl=0
|
|
if [ "$base_script_name" ]; then
|
|
verb "Running ${DARKBLUE}$relation_name${NORMAL} relation-joined script" \
|
|
"for $DARKYELLOW$service$NORMAL (charm $DARKPINK$charm$NORMAL)"
|
|
RELATION_CONFIG="$relation_dir/config_providee"
|
|
RELATION_DATA="$(cat "$RELATION_DATA_FILE")"
|
|
DOCKER_BASE_IMAGE=$(service_ensure_image_ready "$service") || return 1
|
|
export DOCKER_BASE_IMAGE RELATION_CONFIG RELATION_DATA
|
|
{
|
|
(
|
|
SERVICE_NAME=$service
|
|
SERVICE_DATASTORE="$DATASTORE/$service"
|
|
SERVICE_CONFIGSTORE="$CONFIGSTORE/$service"
|
|
export SERVICE_NAME DOCKER_BASE_IMAGE SERVICE_DATASTORE SERVICE_CONFIGSTORE
|
|
charm.run_relation_hook local "$charm" "$relation_name" relation-joined
|
|
echo "$?" > "$relation_dir/errlvl"
|
|
) | logstdout "$DARKYELLOW$service$NORMAL/$DARKBLUE$relation_name$NORMAL (joined) ${GREEN}@${NORMAL}"
|
|
} 3>&1 1>&2 2>&3 | logstderr "$DARKYELLOW$service$NORMAL/$DARKBLUE$relation_name$NORMAL (joined) ${RED}@$NORMAL" 3>&1 1>&2 2>&3
|
|
errlvl="$(cat "$relation_dir/errlvl")" || {
|
|
err "Relation script '$script_name' in $DARKPINK$charm$NORMAL" \
|
|
"failed before outputing an errorlevel."
|
|
((errlvl |= "1" ))
|
|
}
|
|
if [ -e "$RELATION_CONFIG" ]; then
|
|
_config_merge "$state_tmpdir/to-merge-in-docker-compose.yml" "$RELATION_CONFIG" &&
|
|
rm "$RELATION_CONFIG"
|
|
((errlvl |= "$?" ))
|
|
fi
|
|
if [ "$errlvl" != 0 ]; then
|
|
err "Relation $DARKBLUE$relation_name$NORMAL on $DARKYELLOW$service$NORMAL failed to run properly."
|
|
fi
|
|
else
|
|
verb "No relation script '$script_name' in charm $DARKPINK$charm$NORMAL. Ignoring."
|
|
fi
|
|
else
|
|
err "Relation $DARKBLUE$relation_name$NORMAL on $DARKYELLOW$target_service$NORMAL failed to run properly."
|
|
fi
|
|
|
|
if [ "$target_errlvl" == 0 -a "$errlvl" == 0 ]; then
|
|
debug "Relation $DARKBLUE$relation_name$NORMAL is established" \
|
|
"between $DARKYELLOW$service$NORMAL and $DARKYELLOW$target_service$NORMAL."
|
|
return 0
|
|
else
|
|
return 1
|
|
fi
|
|
}
|
|
export -f _run_service_relation
|
|
|
|
|
|
_get_compose_relations_cached () {
|
|
local compose_service_def="$1" cache_file="$CACHEDIR/$FUNCNAME.cache.$(echo "$*" | md5_compat)" \
|
|
relation_name relation_def target_service
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: STATIC cache hit $1"
|
|
cat "$cache_file" &&
|
|
touch "$cache_file" || return 1
|
|
return 0
|
|
fi
|
|
|
|
(
|
|
set -o pipefail
|
|
if [ "$compose_service_def" ]; then
|
|
while read-0 relation_name relation_def; do
|
|
## XXXvlab: could we use braces here instead of parenthesis ?
|
|
(
|
|
case "$(echo "$relation_def" | shyaml get-type 2>/dev/null)" in
|
|
"str")
|
|
target_service="$(echo "$relation_def" | shyaml get-value 2>/dev/null)" || return 1
|
|
tech_dep="$(get_service_relation_tech_dep "$target_service" "$relation_name")" || return 1
|
|
printf "%s\0" "$relation_name" "$target_service" "" "$tech_dep"
|
|
;;
|
|
"sequence")
|
|
while read-0 target_service; do
|
|
tech_dep="$(get_service_relation_tech_dep "$target_service" "$relation_name")" || return 1
|
|
printf "%s\0" "$relation_name" "$target_service" "" "$tech_dep"
|
|
done < <(echo "$relation_def" | shyaml get-values-0 2>/dev/null)
|
|
;;
|
|
"struct")
|
|
while read-0 target_service relation_config; do
|
|
tech_dep="$(get_service_relation_tech_dep "$target_service" "$relation_name")" || return 1
|
|
printf "%s\0" "$relation_name" "$target_service" "$relation_config" "$tech_dep"
|
|
done < <(echo "$relation_def" | shyaml key-values-0 2>/dev/null)
|
|
;;
|
|
esac
|
|
) </dev/null >> "$cache_file" || return 1
|
|
done < <(echo "$compose_service_def" | shyaml key-values-0 relations 2>/dev/null)
|
|
fi
|
|
)
|
|
if [ "$?" != 0 ]; then
|
|
err "Error while looking for compose relations."
|
|
rm -f "$cache_file" ## no cache
|
|
return 1
|
|
fi
|
|
[ -e "$cache_file" ] && cat "$cache_file"
|
|
return 0
|
|
}
|
|
export -f _get_compose_relations_cached
|
|
|
|
|
|
get_compose_relations () {
|
|
local service="$1" cache_file="$state_tmpdir/$FUNCNAME.cache.$1" \
|
|
compose_def
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: SESSION cache hit $1"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
compose_def="$(get_compose_service_def "$service")" || return 1
|
|
_get_compose_relations_cached "$compose_def" > "$cache_file"
|
|
if [ "$?" != 0 ]; then
|
|
rm -f "$cache_file" ## no cache
|
|
return 1
|
|
fi
|
|
cat "$cache_file"
|
|
}
|
|
export -f get_compose_relations
|
|
|
|
|
|
get_all_services() {
|
|
local cache_file="$state_tmpdir/$FUNCNAME.cache.$(H "$ALL_RELATIONS")" \
|
|
s rn ts rc td services
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: SESSION cache hit $1"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
if [ -z "$ALL_RELATIONS" ]; then
|
|
err "Can't access global \$ALL_RELATIONS"
|
|
return 1
|
|
fi
|
|
|
|
declare -A services
|
|
while read-0 s _ ts _ _; do
|
|
for service in "$s" "$ts"; do
|
|
[ "${services[$service]}" ] && continue
|
|
services["$service"]=1
|
|
echo "$service"
|
|
done
|
|
done < <(cat "$ALL_RELATIONS") > "$cache_file"
|
|
|
|
cat "$cache_file"
|
|
}
|
|
export -f get_all_services
|
|
|
|
|
|
get_service_relations () {
|
|
local service="$1" cache_file="$state_tmpdir/$FUNCNAME.cache.$(H "$@" "$ALL_RELATIONS")" \
|
|
s rn ts rc td
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: SESSION cache hit $1"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
if [ -z "$ALL_RELATIONS" ]; then
|
|
err "Can't access global \$ALL_RELATIONS"
|
|
return 1
|
|
fi
|
|
|
|
while read-0 s rn ts rc td; do
|
|
[[ "$s" == "$service" ]] || continue
|
|
printf "%s\0" "$rn" "$ts" "$rc" "$td"
|
|
done < <(cat "$ALL_RELATIONS") > "$cache_file"
|
|
|
|
cat "$cache_file"
|
|
}
|
|
export -f get_service_relations
|
|
|
|
|
|
get_service_relation() {
|
|
local service="$1" relation="$2" cache_file="$state_tmpdir/$FUNCNAME.cache.$1.$2" \
|
|
rn ts rc td
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: SESSION cache hit $1"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
while read-0 rn ts rc td; do
|
|
[ "$relation" == "$rn" ] && {
|
|
printf "%s\0" "$ts" "$rc" "$td"
|
|
break
|
|
}
|
|
done < <(get_service_relations "$service") > "$cache_file"
|
|
if [ "$?" != 0 ]; then
|
|
rm -f "$cache_file" ## no cache
|
|
return 1
|
|
fi
|
|
cat "$cache_file"
|
|
}
|
|
export -f get_service_relation
|
|
|
|
|
|
## From a service and a relation, get all relations targeting given
|
|
## service with given relation.
|
|
##
|
|
## Returns a NUL separated list of couple of:
|
|
## (base_service, relation_config)
|
|
##
|
|
get_service_incoming_relations() {
|
|
local service="$1" relation="$2" cache_file="$state_tmpdir/$FUNCNAME.cache.$(H "$@" "$ALL_RELATIONS")" \
|
|
s rn ts rc td
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: SESSION cache hit $1"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
if [ -z "$ALL_RELATIONS" ]; then
|
|
err "Can't access global \$ALL_RELATIONS"
|
|
return 1
|
|
fi
|
|
|
|
while read-0 s rn ts rc _td; do
|
|
[[ "$ts" == "$service" ]] || continue
|
|
[[ "$rn" == "$relation" ]] || continue
|
|
relation_data_file=$(get_relation_data_file "$s" "$ts" "$rn" "$rc") || return 1
|
|
printf "%s\0" "$s" "$(cat "$relation_data_file")"
|
|
done < <(cat "$ALL_RELATIONS") > "$cache_file"
|
|
|
|
cat "$cache_file"
|
|
}
|
|
export -f get_service_incoming_relations
|
|
|
|
|
|
export TRAVERSE_SEPARATOR=:
|
|
## Traverse on first service satisfying relation
|
|
service:traverse() {
|
|
local service_path="$1"
|
|
{
|
|
SEPARATOR=:
|
|
read -d "$TRAVERSE_SEPARATOR" service
|
|
while read -d "$TRAVERSE_SEPARATOR" relation; do
|
|
## XXXvlab: Take only first service
|
|
if ! read-0 ts _ _ < <(get_service_relation "${service}" "${relation}"); then
|
|
err "Couldn't find relation ${DARKCYAN}${relation}${NORMAL}" \
|
|
"from ${DARKYELLOW}$service${NORMAL}."
|
|
return 1
|
|
fi
|
|
service="$ts"
|
|
done
|
|
echo "$service"
|
|
} < <(e "${service_path}${TRAVERSE_SEPARATOR}")
|
|
}
|
|
export -f service:traverse
|
|
|
|
|
|
service:relation-file() {
|
|
local service_path="$1" relation service relation_file
|
|
if ! [[ "$service_path" == *"$TRAVERSE_SEPARATOR"* ]]; then
|
|
err "Invalid argument '$service_path'." \
|
|
"Must provide a service path (no '${TRAVERSE_SEPARATOR}' found)."
|
|
return 1
|
|
fi
|
|
relation="${service_path##*${TRAVERSE_SEPARATOR}}"
|
|
service=$(service:traverse "${service_path%${TRAVERSE_SEPARATOR}*}") || return 1
|
|
if ! read-0 ts rc _ < <(get_service_relation "${service}" "${relation}"); then
|
|
err "Couldn't find relation ${DARKCYAN}${relation}${NORMAL}" \
|
|
"from ${DARKYELLOW}$service${NORMAL}."
|
|
return 1
|
|
fi
|
|
relation_dir=$(get_relation_data_dir "$service" "$ts" "$relation") || {
|
|
err "Failed to find relation file"
|
|
return 1
|
|
}
|
|
relation_file="$relation_dir/data"
|
|
if ! [ -e "$relation_file" ]; then
|
|
e "$rc" > "$relation_file"
|
|
chmod go-rwx "$relation_file" ## protecting this file
|
|
fi
|
|
echo "$relation_file"
|
|
}
|
|
export -f service:relation-file
|
|
|
|
|
|
service:relation-options() {
|
|
local service_path="$1" relation_file
|
|
relation_file=$(service:relation-file "$service_path") || {
|
|
err "Failed to find relation file"
|
|
return 1
|
|
}
|
|
cat "$relation_file"
|
|
}
|
|
export -f service:relation-options
|
|
|
|
|
|
relation:get() {
|
|
local service_path="$1" query="$2" relation_file
|
|
relation_file=$(service:relation-file "$service_path") || {
|
|
err "Failed to find relation file"
|
|
return 1
|
|
}
|
|
cfg-get-value "$query" < "$relation_file"
|
|
}
|
|
export -f relation:get
|
|
|
|
|
|
|
|
_get_charm_metadata_uses() {
|
|
local metadata="$1" cache_file="$CACHEDIR/$FUNCNAME.cache.$(printf "%s\0" "$@" | md5_compat)"
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: SESSION cache hit $1"
|
|
cat "$cache_file" || return 1
|
|
return 0
|
|
fi
|
|
|
|
printf "%s" "$metadata" | { shyaml key-values-0 uses 2>/dev/null || true; } | tee "$cache_file"
|
|
}
|
|
export -f _get_charm_metadata_uses
|
|
|
|
|
|
_get_service_metadata() {
|
|
local service="$1" cache_file="$state_tmpdir/$FUNCNAME.cache.$1" \
|
|
charm
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: SESSION cache hit $1"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
charm="$(get_service_charm "$service")" || return 1
|
|
charm.metadata "$charm" > "$cache_file"
|
|
if [ "$?" != 0 ]; then
|
|
rm -f "$cache_file" ## no cache
|
|
return 1
|
|
fi
|
|
cat "$cache_file"
|
|
}
|
|
export -f _get_service_metadata
|
|
|
|
|
|
_get_service_uses() {
|
|
local service="$1" cache_file="$state_tmpdir/$FUNCNAME.cache.$1" \
|
|
metadata
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: SESSION cache hit $1"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
metadata="$(_get_service_metadata "$service")" || return 1
|
|
_get_charm_metadata_uses "$metadata" > "$cache_file"
|
|
if [ "$?" != 0 ]; then
|
|
rm -f "$cache_file" ## no cache
|
|
return 1
|
|
fi
|
|
cat "$cache_file"
|
|
}
|
|
export -f _get_service_uses
|
|
|
|
|
|
_get_services_uses() {
|
|
local cache_file="$state_tmpdir/$FUNCNAME.cache.$(printf "%s\0" "$@" | md5_compat)" \
|
|
service rn rd
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: SESSION cache hit $1"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
for service in "$@"; do
|
|
_get_service_uses "$service" | while read-0 rn rd; do
|
|
printf "%s\0" "$service" "$rn" "$rd"
|
|
done
|
|
[ "${PIPESTATUS[0]}" == 0 ] || {
|
|
return 1
|
|
}
|
|
done > "${cache_file}.wip"
|
|
mv "${cache_file}"{.wip,} &&
|
|
cat "$cache_file" || return 1
|
|
}
|
|
export -f _get_services_uses
|
|
|
|
|
|
_get_provides_provides() {
|
|
local provides="$1" cache_file="$CACHEDIR/$FUNCNAME.cache.$(printf "%s\0" "$@" | md5_compat)" \
|
|
service rn rd
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
# debug "$FUNCNAME: CACHEDIR cache hit $1"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
type=$(printf "%s" "$provides" | shyaml get-type)
|
|
case "$type" in
|
|
sequence)
|
|
while read-0 prov; do
|
|
printf "%s\0" "$prov" ""
|
|
done < <(echo "$provides" | shyaml get-values-0)
|
|
;;
|
|
struct)
|
|
printf "%s" "$provides" | shyaml key-values-0
|
|
;;
|
|
str)
|
|
printf "%s\0" "$(echo "$provides" | shyaml get-value)" ""
|
|
;;
|
|
*)
|
|
err "Unexpected type '$type' for provider identifier in charm '$charm'."
|
|
return 1
|
|
esac | tee "$cache_file"
|
|
return "${PIPESTATUS[0]}"
|
|
}
|
|
|
|
|
|
_get_metadata_provides() {
|
|
local metadata="$1" cache_file="$CACHEDIR/$FUNCNAME.cache.$(printf "%s\0" "$@" | md5_compat)" \
|
|
service rn rd
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: CACHEDIR cache hit"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
provides=$(printf "%s" "$metadata" | shyaml -q get-value -y provides "")
|
|
[ "$provides" -a "$provides" != "''" ] || { touch "$cache_file"; return 0; }
|
|
|
|
_get_provides_provides "$provides" | tee "$cache_file"
|
|
return "${PIPESTATUS[0]}"
|
|
}
|
|
|
|
_get_services_provides() {
|
|
local cache_file="$state_tmpdir/$FUNCNAME.cache.$(printf "%s\0" "$@" | md5_compat)" \
|
|
service rn rd
|
|
|
|
if [ -e "$cache_file" ]; then
|
|
#debug "$FUNCNAME: SESSION cache hit $1"
|
|
cat "$cache_file"
|
|
return 0
|
|
fi
|
|
|
|
## YYY: replace the inner loop by a cached function
|
|
for service in "$@"; do
|
|
metadata="$(_get_service_metadata "$service")" || return 1
|
|
|
|
|