Browse Source

new: [0km,vps] add ``vps stats`` and ``0km vps-stats`` actions to monitor containers

pull/4/head
Valentin Lab 1 year ago
parent
commit
e5b49fc16f
  1. 39
      README.org
  2. 374
      bin/0km
  3. 478
      bin/vps
  4. 4
      etc/cron.d/monitor

39
README.org

@ -311,6 +311,45 @@ compose --debug logs odoo
docker-ip docker-ip
#+END_SRC #+END_SRC
*** Obtenir des statistiques d'utilisation des resources
**** Consommation
La commande ~docker stats~ permet d'afficher en temps réel la consommation des
différentes ressources (mémoire, processeur, réseau...).
À noter, la commande ~vps stats~ fait la même chose, et rempli la base
de donnée pour l'historique des utilisations. Cette commande est
normalement lancée par cron régulièrement.
Les bases de données d'utilisation sont stockée dans ~/var/lib/vps/rrd~
**** Historique de la consommation des ressources
Depuis ~0km~ il est possible de grapher les informations d'un VPS:
#+begin_src sh
0km vps-stats [--timespan START[..END]] VPS [VPS...]
#+end_src
Exemples:
#+begin_src sh
0km vps-stats vps-{01,02}.0k.io ## dernier jour de donnée
0km vps-stats vps-{01,02}.0k.io -t e-1w ## end moins 1 semaine de donnée
0km vps-stats vps-{01,02}.0k.io -t e-5d ## end moins 5 jours de donnée
0km vps-stats vps-01.0k.io -t n-3h..n-2h ## now(maintenant) moins 3h à now moins 2h
0km vps-stats vps-01.0k.io -t 17:40..17:50 ## de 17:40 à 17:50 (heure locale !)
0km vps-stats vps-01.0k.io -t "20230811..17:50" ## du début de la journée de 2023-08-11 à 17:50 ajd
## graphe dynamique qui se met à jour sur les 2 dernière heures
0km vps-stats vps-01.0k.io -t "n-2h" -f
#+end_src
Pour plus de détail sur le format de début et de fin, se rapporter à
la fin de la page man de [[https://linux.die.net/man/1/rrdfetch][rrdfetch]].
*** Limiter la mémoire utilisée par un container *** Limiter la mémoire utilisée par un container
Certains container vont demander beaucoup de memoire par défaut et Certains container vont demander beaucoup de memoire par défaut et

374
bin/0km

@ -89,6 +89,33 @@ ssh:open() {
trap_add EXIT,INT 'ssh:quit "$hostname"' trap_add EXIT,INT 'ssh:quit "$hostname"'
} }
ssh:rsync() {
local src="$1" dst="$2"
hostname=${src%%:*}
hostname=${hostname#*@}
local rsync_ssh_options=(
-o ControlPath="/tmp/ssh-control-master-${master_pid}-$hostname"
-o ControlMaster=auto
-o ControlPersist=900
-o ConnectTimeout=10
-o StrictHostKeyChecking=no
)
if ! ssh:run "root@$hostname" -- type -p rsync </dev/null >/dev/null; then
info "No 'rsync' available on '$hostname'. Requesting installation..."
ssh:run "root@$hostname" -- apt-get install rsync -y || {
err "Installation of 'rsync' failed on '$hostname'"
return 1
}
fi
local cmd=(
rsync -e "ssh ${rsync_ssh_options[*]}"
-azvArH --delete --delete-excluded
--partial --partial-dir .rsync-partial
"$src" "$dst"
)
"${cmd[@]}"
}
ssh:open-try() { ssh:open-try() {
local opts hostnames local opts hostnames
@ -1190,6 +1217,353 @@ cmdline.spec::cmd:vps-space:run() {
} }
cmdline.spec.gnu vps-stats
cmdline.spec::cmd:vps-stats:run() {
: :posarg: [VPS...] 'Target host(s) to get stats'
: :optfla: --follow,-f 'Refresh graph every 2m'
: :optval: --timespan,-t 'timespan START[..END]'
: :optval: --resource,-r 'resource(s) separated with a comma'
: :optval: --interval,-i 'refersh interval (default: 60s)'
local opts_rrdfetch=( -a )
if [ -n "${opt_timespan}" ]; then
start=${opt_timespan%\.\.*}
opts_rrdfetch+=(-s "$start")
if [ "$start" != "${opt_timespan}" ]; then
end=${opt_timespan#*..}
opts_rrdfetch+=(-e "$end")
fi
fi
local resources=(c.memory c.network load_avg)
if [ -n "${opt_resource}" ]; then
resources=(${opt_resource//,/ })
fi
local not_found=()
for resource in "${resources[@]}"; do
if ! fn.exists "graph:def:$resource"; then
not_found+=("$resource")
fi
done
if [[ "${#not_found[@]}" -gt 0 ]]; then
not_found_msg=$(printf "%s, " "${not_found[@]}")
not_found_msg=${not_found_msg%, }
err "Unsupported resource(s) provided: ${not_found_msg}"
echo " resource must be one-of:" >&2
declare -F | egrep 'graph:def:[a-zA-Z_.]+$' | cut -f 3- -d " " | cut -f 3- -d ":" | prefix " - " >&2
return 1
fi
if [ "${#VPS[@]}" == 0 ]; then
err "You must provide a VPS list as positional arguments"
return 1
fi
include cache
if [ -z "$VAR_DIR" ]; then
err "Unset \$VAR_DIR, can't downlowd rrd graph"
return 1
fi
mkdir -p "$VAR_DIR/rrd"
if ! [ -d "$VAR_DIR/rrd" ]; then
err "Invalid \$VAR_DIR: '$VAR_DIR/rrd' is not a directory"
return 1
fi
(
for vps in "${VPS[@]}"; do
(
{
{
ssh:open "root@$vps" 2>/dev/null || {
err "Can't open connection $vps."
return 1
}
while true; do
echo "${WHITE}Collecting stats${NORMAL}..."
{
{
ssh:rsync "root@$vps:/var/lib/vps/rrd/" "${VAR_DIR}/rrd/${vps}"
} 3>&1 1>&2 2>&3 | prefix " ${DARKRED}\!${NORMAL} "
set_errlvl "${PIPESTATUS[0]}"
} 3>&1 1>&2 2>&3 | prefix " ${GRAY}|${NORMAL} "
echo " ${GRAY}..${NORMAL} ${DARKGREEN}done${NORMAL} collecting stats"
[ -z "$opt_follow" ] && break
echo "${WHITE}Sleeping ${DARKYELLOW}${opt_interval:-60}${NORMAL}s..."
sleep "${opt_interval:-60}"
echo " ${GRAY}..${NORMAL} ${DARKGREEN}done${NORMAL} sleeping"
done
} 3>&1 1>&2 2>&3 | prefix " ${DARKRED}\!${GRAY} collect(${DARKCYAN}$vps${GRAY})${NORMAL} "
set_errlvl "${PIPESTATUS[0]}"
} 3>&1 1>&2 2>&3 | prefix " ${GRAY}| collect(${DARKCYAN}$vps${GRAY})${NORMAL} " >&2
) &
done
wait
) &
collect_pid="$!"
if [ -z "$opt_follow" ]; then
echo "${WHITE}Fetching last stats${NORMAL}${GRAY}..${NORMAL}" >&2
wait
echo " ${GRAY}..${DARKGREEN} done${NORMAL} fetching stats" >&2
else
collect_end_msg=" ${GRAY}..${NORMAL} ${DARKGREEN}stop${NORMAL} collecting daemon (pid: ${DARKYELLOW}$collect_pid${NORMAL})"
trap_add EXIT \
"printf '%s\n' \"$collect_end_msg\" && kill $collect_pid"
echo "${WHITE}Start collecting daemon${NORMAL} (pid: ${DARKYELLOW}$collect_pid${NORMAL}) ${GRAY}..${NORMAL}" >&2
fi
( depends gnuplot ) || {
echo ""
echo " Gnuplot is required to display graphs..." \
"You might want to try to install ${WHITE}gnuplot${NORMAL} with:"
echo ""
echo " apt install gnuplot"
echo ""
return 1
} >&2
export GNUTERM=qt
exec {PFD}> >(exec gnuplot 2>/dev/null)
gnuplot_pid="$!"
if [ -z "$opt_follow" ]; then
echo "${WHITE}Draw gnuplot graph${GRAY}..${NORMAL}" >&2
else
gnuplot_end_msg=" ${GRAY}..${NORMAL} ${DARKGREEN}stop${NORMAL} gnuplot process (pid: $gnuplot_pid)"
trap_add EXIT \
"printf '%s\n' \"$gnuplot_end_msg\" && kill $gnuplot_pid"
echo "${WHITE}Start gnuplot process${NORMAL} (pid: $gnuplot_pid) ${GRAY}..${NORMAL}" >&2
fi
echo "set term qt noraise replotonresize" >&$PFD
while true; do
{
i=0
data_start_ts=
data_stop_ts=
for resource in "${resources[@]}"; do
for vps in "${VPS[@]}"; do
rrd_vps_path="$VAR_DIR/rrd/$vps"
[ -d "$rrd_vps_path" ] || {
warn "No data yet for vps '$vps'... Ignoring"
continue
}
((i++))
out=$(graph:def:"${resource}" "$vps" "$i" "${opts_rrdfetch[@]}")
printf "%s\n" "$out" >/tmp/toto.gnuplot
printf "%s\n" "$out"
done
done
} >&$PFD
if [ -z "$opt_follow" ]; then
echo " ${GRAY}..${DARKGREEN} done${NORMAL} gnuplot graphing" >&2
break
else
{
echo "${WHITE}Sleeping ${DARKYELLOW}${opt_interval:-60}${NORMAL}s..."
sleep "${opt_interval:-60}"
echo " ${GRAY}..${NORMAL} ${DARKGREEN}done${NORMAL} sleeping"
} | prefix " ${GRAY}| gnuplot:${NORMAL} " >&2
fi
done
if [ -n "$opt_follow" ]; then
echo "Waiting for child process to finish.." >&2
wait
echo " ..done" >&2
else
echo "pause mouse close" >&$PFD
fi
}
graph:def:c.memory() {
local vps="$1" i="$2"
shift 2
local opts_rrdfetch=("$@")
local resource="memory"
rrd_vps_path="$VAR_DIR/rrd/$vps"
[ -d "$rrd_vps_path/containers" ] || {
warn "No containers data yet for vps '$vps'... Ignoring"
return 0
}
containers=(
$(
cd "$rrd_vps_path/containers";
find -maxdepth 3 -mindepth 3 -name "${resource}.rrd" -type f |
sed -r 's%^./([^/]+/[^/]+)/[^/]+.rrd$%\1%g'
)
)
gnuplot_line_config=(
"set term qt $i title \"$vps $resource\" replotonresize noraise"
"set title '$vps'"
"set xdata time"
"set timefmt '%s'"
"set ylabel '$resource Usage'"
"set format y '%s'"
"set ytics format ' %g'"
"set mouse mouseformat 6"
"set yrange [0:*] "
"set border behind"
)
printf "%s\n" "${gnuplot_line_config[@]}"
first=1
for container in "${containers[@]}"; do
rrdfetch_cmd="'< rrdtool fetch \"$rrd_vps_path/containers/$container/$resource.rrd\""
rrdfetch_cmd+=" AVERAGE ${opts_rrdfetch[*]} | \\"$'\n'
rrdfetch_cmd+=" tail -n +2 | \\"$'\n'
rrdfetch_cmd+=" egrep -v \"^$\" | sed -r \"s/ -?nan/ -/g;s/^([0-9]+): /\\1 /g\"'"
rrdfetch_cmd_bash=$(eval echo "${rrdfetch_cmd}")
rrdfetch_cmd_bash=${rrdfetch_cmd_bash#< }
first_ts=
first_ts=$(eval "$rrdfetch_cmd_bash" | head -n 1 | cut -f 1 -d " ")
if [ -z "$first_ts" ]; then
warn "No data for $container on vps $vps, skipping..."
continue
fi
last_ts=$(eval "$rrdfetch_cmd_bash" | tail -n 1 | cut -f 1 -d " ")
if [[ -z "$data_start_ts" ]] || [[ "$data_start_ts" > "$first_ts" ]]; then
data_start_ts="$first_ts"
fi
if [[ -z "$data_stop_ts" ]] || [[ "$data_stop_ts" < "$last_ts" ]]; then
data_stop_ts="$last_ts"
fi
if [ -n "$first" ]; then
first=
echo "plot \\"
else
echo ", \\"
fi
container="${container//\'/}"
container="${container//@/\\@}"
echo -n " ${rrdfetch_cmd} u 1:((\$3 - \$2)/1000000000) w lines title '${container//_/\\_}'"
done
echo
}
graph:def:c.network() {
local vps="$1" i="$2"
shift 2
local opts_rrdfetch=("$@")
local resource="network"
rrd_vps_path="$VAR_DIR/rrd/$vps"
[ -d "$rrd_vps_path/containers" ] || {
warn "No containers data yet for vps '$vps'... Ignoring"
return 0
}
containers=(
$(
cd "$rrd_vps_path/containers";
find -maxdepth 3 -mindepth 3 -name "${resource}.rrd" -type f |
sed -r 's%^./([^/]+/[^/]+)/[^/]+.rrd$%\1%g'
)
)
gnuplot_line_config=(
"set term qt $i title \"$vps $resource\" replotonresize noraise"
"set title '$vps'"
"set xdata time"
"set timefmt '%s'"
"set ylabel '$resource Usage'"
"set format y '%s'"
"set ytics format ' %.2f MiB/s'"
"set mouse mouseformat 6"
"set yrange [0:*] "
"set border behind"
)
printf "%s\n" "${gnuplot_line_config[@]}"
first=1
for container in "${containers[@]}"; do
rrdfetch_cmd="'< rrdtool fetch \"$rrd_vps_path/containers/$container/$resource.rrd\""
rrdfetch_cmd+=" AVERAGE ${opts_rrdfetch[*]} | \\"$'\n'
rrdfetch_cmd+=" tail -n +2 | \\"$'\n'
rrdfetch_cmd+=" egrep -v \"^$\" | sed -r \"s/ -?nan/ -/g;s/^([0-9]+): /\\1 /g\"'"
rrdfetch_cmd_bash=$(eval echo "${rrdfetch_cmd}")
rrdfetch_cmd_bash=${rrdfetch_cmd_bash#< }
first_ts=
first_ts=$(eval "$rrdfetch_cmd_bash" | head -n 1 | cut -f 1 -d " ")
if [ -z "$first_ts" ]; then
warn "No data for $container on vps $vps, skipping..."
continue
fi
last_ts=$(eval "$rrdfetch_cmd_bash" | tail -n 1 | cut -f 1 -d " ")
if [[ -z "$data_start_ts" ]] || [[ "$data_start_ts" > "$first_ts" ]]; then
data_start_ts="$first_ts"
fi
if [[ -z "$data_stop_ts" ]] || [[ "$data_stop_ts" < "$last_ts" ]]; then
data_stop_ts="$last_ts"
fi
if [ -n "$first" ]; then
first=
echo "plot \\"
else
echo ", \\"
fi
container="${container//\'/}"
container="${container//@/\\@}"
echo -n " ${rrdfetch_cmd} u 1:((\$3 / 1024) / 1024) w lines title '${container//_/\\_}'"
done
echo
}
graph:def:load_avg() {
local vps="$1" i="$2"
shift 2
local opts_rrdfetch=("$@")
rrd_vps_path="$VAR_DIR/rrd/$vps"
[ -f "$rrd_vps_path/$resource.rrd" ] || {
warn "No containers data yet for vps '$vps'... Ignoring"
return 0
}
gnuplot_line_config=(
"set term qt $i title \"$vps $resource\" replotonresize noraise"
"set title '$vps'"
"set xdata time"
"set timefmt '%s'"
"set ylabel '${resource//_/\\_} Usage'"
"set format y '%s'"
"set ytics format '%g'"
"set mouse mouseformat 6"
"set yrange [0:*] "
"set border behind"
)
printf "%s\n" "${gnuplot_line_config[@]}"
first=1
for value in 1m:2 5m:3 15m:4; do
label="${value%:*}"
col_num="${value#*:}"
rrdfetch_cmd="'< rrdtool fetch \"$rrd_vps_path/$resource.rrd\""
rrdfetch_cmd+=" AVERAGE ${opts_rrdfetch[*]} | \\"$'\n'
rrdfetch_cmd+=" tail -n +2 | \\"$'\n'
rrdfetch_cmd+=" egrep -v \"^$\" | sed -r \"s/ -?nan/ -/g;s/^([0-9]+): /\\1 /g\"'"
rrdfetch_cmd_bash=$(eval echo "${rrdfetch_cmd}")
rrdfetch_cmd_bash=${rrdfetch_cmd_bash#< }
first_ts=
first_ts=$(eval "$rrdfetch_cmd_bash" | head -n 1 | cut -f 1 -d " ")
if [ -z "$first_ts" ]; then
warn "No data for $resource on vps $vps, skipping..."
continue
fi
last_ts=$(eval "$rrdfetch_cmd_bash" | tail -n 1 | cut -f 1 -d " ")
if [[ -z "$data_start_ts" ]] || [[ "$data_start_ts" > "$first_ts" ]]; then
data_start_ts="$first_ts"
fi
if [[ -z "$data_stop_ts" ]] || [[ "$data_stop_ts" < "$last_ts" ]]; then
data_stop_ts="$last_ts"
fi
if [ -n "$first" ]; then
first=
echo "plot \\"
else
echo ", \\"
fi
container="${container//\'/}"
container="${container//@/\\@}"
echo -n " ${rrdfetch_cmd} u 1:$col_num w lines title '${label}'"
done
echo
}
cmdline::parse "$@" cmdline::parse "$@"

478
bin/vps

@ -18,6 +18,8 @@ version=0.1
desc='Install backup' desc='Install backup'
help="" help=""
version_gt() { test "$(printf '%s\n' "$@" | sort -V | head -n 1)" != "$1"; }
docker:running-container-projects() { docker:running-container-projects() {
:cache: scope=session :cache: scope=session
@ -600,6 +602,15 @@ compose:service:container_first() {
export -f compose:service:container_first export -f compose:service:container_first
docker:running_containers() {
:cache: scope=session
docker ps --format="{{.ID}}"
}
decorator._mangle_fn docker:running_containers
export -f docker:running_containers
compose:project:containers() { compose:project:containers() {
local project="$1" opts local project="$1" opts
@ -1002,6 +1013,245 @@ EOF
return $errlvl return $errlvl
} }
docker:api() {
local endpoint="$1"
curl -sS --unix-socket /var/run/docker.sock "http://localhost$endpoint"
}
docker:containers:id() {
docker:api /containers/json | jq -r ".[] | .Id"
}
docker:containers:names() {
docker:api /containers/json | jq -r '.[] | .Names[0] | ltrimstr("/")'
}
docker:container:stats() {
container="$1"
docker:api "/containers/$container/stats?stream=false"
}
docker:containers:stats() {
:cache: scope=session
local jobs='' line container id_names sha names name data service project
local DC="com.docker.compose"
local PSF_values=(
".ID" ".Names" ".Label \"$DC.project\"" ".Label \"$DC.service\"" ".Image"
)
local PSF="$(printf "{{%s}} " "${PSF_values[@]}")"
id_names=$(docker ps -a --format="$PSF") || return 1
## Create a docker container table from name/sha to service, project, image_name
declare -A resolve
while read-0a line; do
sha=${line%% *}; line=${line#* }
names=${line%% *}; line=${line#* }
names=(${names//,/ })
for name in "${names[@]}"; do
resolve["$name"]="$line"
done
resolve["$sha"]="$line"
done < <(printf "%s\n" "$id_names")
declare -A data
while read-0a line; do
name=${line%% *}; line=${line#* }
ts=${line%% *}; line=${line#* }
resolved="${resolve["$name"]}"
project=${resolved%% *}; resolved=${resolved#* }
service=${resolved%% *}; resolved=${resolved#* }
image_name="$resolved"
if [ -z "$service" ]; then
project="@"
service=$(docker inspect "$image_name" | jq -r '.[0].RepoTags[0]')
service=${service//\//_}
fi
if [ -n "${data["$project/$service"]}" ]; then
previous=(${data["$project/$service"]})
previous=(${previous[@]:1})
current=($line)
sum=()
i=0; max=${#previous[@]}
while (( i < max )); do
sum+=($((${previous[$i]} + ${current[$i]})))
((i++))
done
data["$project/$service"]="$ts ${sum[*]}"
else
data["$project/$service"]="$ts $line"
fi
done < <(
for container in "$@"; do
(
docker:container:stats "${container}" |
jq -r '
(.name | ltrimstr("/"))
+ " " + (.read | sub("\\.[0-9]+Z"; "Z") | fromdate | tostring)
+ " " + (.memory_stats.usage | tostring)
+ " " + (.memory_stats.stats.inactive_file | tostring)
+ " " + ((.memory_stats.usage - .memory_stats.stats.inactive_file) | tostring)
+ " " + (.memory_stats.limit | tostring)
+ " " + (.networks.eth0.rx_bytes | tostring)
+ " " + (.networks.eth0.rx_packets | tostring)
+ " " + (.networks.eth0.rx_errors | tostring)
+ " " + (.networks.eth0.rx_dropped | tostring)
+ " " + (.networks.eth0.tx_bytes | tostring)
+ " " + (.networks.eth0.tx_packets | tostring)
+ " " + (.networks.eth0.tx_errors | tostring)
+ " " + (.networks.eth0.tx_dropped | tostring)
'
) &
jobs=1
done
[ -n "$jobs" ] && wait
)
for label in "${!data[@]}"; do
echo "$label ${data[$label]}"
done
}
decorator._mangle_fn docker:containers:stats
export -f docker:containers:stats
col:normalize:size() {
local alignment=$1
awk -v alignment="$alignment" '{
# Store the entire line in the lines array.
lines[NR] = $0;
# Split the line into fields.
split($0, fields);
# Update max for each field.
for (i = 1; i <= length(fields); i++) {
if (length(fields[i]) > max[i]) {
max[i] = length(fields[i]);
}
}
}
END {
# Print lines with fields padded to max.
for (i = 1; i <= NR; i++) {
split(lines[i], fields);
line = "";
for (j = 1; j <= length(fields); j++) {
# Get alignment for the current field.
align = substr(alignment, j, 1);
if (align != "+") {
align = "-"; # Default to left alignment if not "+".
}
line = line sprintf("%" align max[j] "s ", fields[j]);
}
print line;
}
}'
}
rrd:create() {
local prefix="$1"
shift
local label="$1" step="300" src_def
shift
if [ -z "$VAR_DIR" ]; then
err "Unset \$VAR_DIR, can't create rrd graph"
return 1
fi
mkdir -p "$VAR_DIR"
if ! [ -d "$VAR_DIR" ]; then
err "Invalid \$VAR_DIR: '$VAR_DIR' is not a directory"
return 1
fi
if ! type -p rrdtool >/dev/null 2>&1; then
apt-get install rrdtool -y --force-yes </dev/null
if ! type -p rrdtool 2>/dev/null 2>&1; then
err "Couldn't find nor install 'rrdtool'."
return 1
fi
fi
local RRD_PATH="$VAR_DIR/rrd"
local RRD_FILE="$RRD_PATH/$prefix/$label.rrd"
mkdir -p "${RRD_FILE%/*}"
if [ -f "$RRD_FILE" ]; then
err "File '$RRD_FILE' already exists, use a different label."
return 1
fi
local rrd_ds_opts=()
for src_def in "$@"; do
IFS=":" read -r name type min max rra_types <<<"$src_def"
rra_types=${rra_types:-average,max,min}
rrd_ds_opts+=("DS:$name:$type:900:$min:$max")
done
local step=120
local times=( ## with steps 120 is 2mn datapoint
2m:1w
6m:3w
30m:12w
3h:1y
1d:10y
1w:2080w
)
rrd_rra_opts=()
for time in "${times[@]}"; do
rrd_rra_opts+=("RRA:"{AVERAGE,MIN,MAX}":0.5:$time")
done
cmd=(
rrdtool create "$RRD_FILE" \
--step "$step" \
"${rrd_ds_opts[@]}" \
"${rrd_rra_opts[@]}"
)
"${cmd[@]}" || {
err "Failed command: ${cmd[@]}"
return 1
}
}
rrd:update() {
local prefix="$1"
shift
while read-0a data; do
[ -z "$data" ] && continue
IFS="~" read -ra data <<<"${data// /\~}"
label="${data[0]}"
ts="${data[1]}"
for arg in "$@"; do
IFS="|" read -r name arg <<<"$arg"
rrd_label="${label}/${name}"
rrd_create_opt=()
rrd_update_opt="$ts"
for col_def in ${arg//,/ }; do
col=${col_def%%:*}; create_def=${col_def#*:}
rrd_update_opt="${rrd_update_opt}:${data[$col]}"
rrd_create_opt+=("$create_def")
done
local RRD_ROOT_PATH="$VAR_DIR/rrd"
local RRD_PATH="$RRD_ROOT_PATH/${prefix%/}"
local RRD_FILE="${RRD_PATH%/}/${rrd_label#/}.rrd"
if ! [ -f "$RRD_FILE" ]; then
info "Creating new RRD file '${RRD_FILE#$RRD_ROOT_PATH/}'"
if ! rrd:create "$prefix" "${rrd_label}" "${rrd_create_opt[@]}" </dev/null ; then
err "Couldn't create new RRD file ${rrd_label} with options: '${rrd_create_opt[*]}'"
return 1
fi
fi
rrdtool update "$RRD_FILE" "$rrd_update_opt" || {
err "update failed with options: '$rrd_update_opt'"
return 1
}
done
done
}
@ -1728,4 +1978,232 @@ cmdline.spec::cmd:check-fix:run() {
} }
awk:require() {
local require_at_least="$1" version already_installed
while true; do
if ! version=$(awk --version 2>/dev/null); then
version=""
else
version=${version%%,*}
version=${version##* }
fi
if [ -z "$version" ] || version_gt "$require_at_least" "$version"; then
if [ -z "$already_installed" ]; then
if [ -z "$version" ]; then
info "No 'gawk' available, probably using a clone. Installing 'gawk'..."
else
info "Found gawk version '$version'. Updating 'gawk'..."
fi
apt-get install gawk -y </dev/null || {
err "Failed to install 'gawk'."
return 1
}
already_installed=true
else
if [ -z "$version" ]; then
err "No 'gawk' available even after having installed one"
else
err "'gawk' version '$version' is lower than required" \
"'$require_at_least' even after updating 'gawk'."
fi
return 1
fi
continue
fi
return 0
done
}
cmdline.spec.gnu stats
cmdline.spec::cmd:stats:run() {
: :optval: --format,-f "Either 'silent', 'raw', or 'pretty', default is pretty."
: :optfla: --silent,-s "Shorthand for '--format silent'"
: :optval: --resource,-r 'resource(s) separated with a comma'
local project_name service_name containers container check
if [[ -n "${opt_silent}" ]]; then
if [[ -n "${opt_format}" ]]; then
err "'--silent' conflict with option '--format'."
return 1
fi
opt_format=s
fi
opt_format="${opt_format:-pretty}"
case "${opt_format}" in
raw|r)
opt_format="raw"
:
;;
silent|s)
opt_format="silent"
;;
pretty|p)
opt_format="pretty"
awk:require 4.1.4 || return 1
;;
*)
err "Invalid value '$opt_format' for option --format"
echo " use either 'raw' (shorthand 'r'), 'silent' (shorthand 's') or pretty (shorthand 'p')." >&2
return 1
esac
local resources=(c.{memory,network} load_avg)
if [ -n "${opt_resource}" ]; then
resources=(${opt_resource//,/ })
fi
local not_found=()
for resource in "${resources[@]}"; do
if ! fn.exists "stats:$resource"; then
not_found+=("$resource")
fi
done
if [[ "${#not_found[@]}" -gt 0 ]]; then
not_found_msg=$(printf "%s, " "${not_found[@]}")
not_found_msg=${not_found_msg%, }
err "Unsupported resource(s) provided: ${not_found_msg}"
echo " resource must be one-of:" >&2
declare -F | egrep -- '-fx? stats:[a-zA-Z0-9_.]+$' | cut -f 3- -d " " | cut -f 2- -d ":" | prefix " - " >&2
return 1
fi
:state-dir:
for resource in "${resources[@]}"; do
[ "$opt_format" == "pretty" ] && echo "${WHITE}$resource${NORMAL}:"
stats:"$resource" "$opt_format" 2>&1 | prefix " "
set_errlvl "${PIPESTATUS[0]}" || return 1
done
}
stats:c.memory() {
local format="$1"
local out
container_to_check=($(docker:running_containers)) || exit 1
out=$(docker:containers:stats "${container_to_check[@]}")
printf "%s\n" "$out" | rrd:update "containers" "memory|3:usage:GAUGE:U:U,4:inactive:GAUGE:U:U" || {
return 1
}
case "${format:-p}" in
raw|r)
printf "%s\n" "$out" | cut -f 1-5 -d " "
;;
pretty|p)
awk:require 4.1.4 || return 1
{
echo "container" "__total____" "buffered____" "resident____"
printf "%s\n" "$out" |
awk '
{
offset = strftime("%z", $2);
print $1, substr($0, index($0,$3));
}' | cut -f 1-4 -d " " |
numfmt --field 2-4 --to=iec-i --format=%8.1fB |
sed -r 's/(\.[0-9])([A-Z]?iB)/\1:\2/g' |
sort
} | col:normalize:size -+++ |
sed -r 's/(\.[0-9]):([A-Z]?iB)/\1 \2/g' |
header:make
;;
esac
}
stats:c.network() {
local format="$1"
local out
container_to_check=($(docker:running_containers)) || exit 1
out=$(docker:containers:stats "${container_to_check[@]}")
cols=(
{rx,tx}_{bytes,packets,errors,dropped}
)
idx=5 ## starting column idx for next fields
defs=()
for col in "${cols[@]}"; do
defs+=("$((idx++)):${col}:COUNTER:U:U")
done
OLDIFS="$IFS"
IFS="," defs="${defs[*]}"
IFS="$OLDIFS"
printf "%s\n" "$out" |
rrd:update "containers" \
"network|${defs}" || {
return 1
}
case "${format:-p}" in
raw|r)
printf "%s\n" "$out" | cut -f 1,2,7- -d " "
;;
pretty|p)
awk:require 4.1.4 || return 1
{
echo "container" "_" "_" "_" "RX" "_" "_" "_" "TX"
echo "_" "__bytes____" "__packets" "__errors" "__dropped" "__bytes____" "__packets" "__errors" "__dropped"
printf "%s\n" "$out" |
awk '
{
offset = strftime("%z", $2);
print $1, substr($0, index($0,$7));
}' |
numfmt --field 2,6 --to=iec-i --format=%8.1fB |
numfmt --field 3,4,5,7,8,9 --to=si --format=%8.1f |
sed -r 's/(\.[0-9])([A-Z]?(iB|B)?)/\1:\2/g' |
sort
} | col:normalize:size -++++++++ |
sed -r '
s/(\.[0-9]):([A-Z]?iB)/\1 \2/g;
s/(\.[0-9]):([KMGTPE])/\1 \2/g;
s/ ([0-9]+)\.0:B/\1 /g;
s/ ([0-9]+)\.0:/\1 /g;
' |
header:make 2
;;
esac
}
header:make() {
local nb_line="${1:-1}"
local line
while ((nb_line-- > 0)); do
read-0a line
echo "${GRAY}$(printf "%s" "$line" | sed -r 's/_/ /g')${NORMAL}"
done
cat
}
stats:load_avg() {
local format="$1"
local out
out=$(host:sys:load_avg)
printf "%s\n" "$out" | rrd:update "" "load_avg|2:load_avg_1:GAUGE:U:U,3:load_avg_5:GAUGE:U:U,4:load_avg_15:GAUGE:U:U" || {
return 1
}
case "${format:-p}" in
raw|r)
printf "%s\n" "$out" | cut -f 2-5 -d " "
;;
pretty|p)
{
echo "___1m" "___5m" "__15m"
printf "%s\n" "$out" | cut -f 3-5 -d " "
} | col:normalize:size +++ | header:make
;;
esac
}
host:sys:load_avg() {
local uptime
uptime="$(uptime)"
uptime=${uptime##*: }
uptime=${uptime//,/}
printf "%s " "" "$(date +%s)" "$uptime"
}
cmdline::parse "$@" cmdline::parse "$@"

4
etc/cron.d/monitor

@ -0,0 +1,4 @@
SHELL=/bin/bash
PATH=/usr/local/sbin:/usr/local/bin:/sbin:/bin:/usr/sbin:/usr/bin
*/2 * * * * root lock vps-stats -v -D -p 10 -k -c "vps stats -s" 2>&1 | logger -t stats
Loading…
Cancel
Save