Browse Source

collectd: SQM collect data from some qdiscs

Add a script that can collect data from mq (multi queue) and cake
qdiscs.

Script is reliant on collectd's 'exec' module.

Signed-off-by: Kevin Darbyshire-Bryant <ldir@darbyshire-bryant.me.uk>
lilik-openwrt-22.03
Kevin Darbyshire-Bryant 5 years ago
committed by Kevin Darbyshire-Bryant
parent
commit
38235de9c3
4 changed files with 159 additions and 0 deletions
  1. +28
    -0
      utils/collectd/Makefile
  2. +5
    -0
      utils/collectd/files/collectd.init
  3. +106
    -0
      utils/collectd/files/exec-scripts/sqm_collectd.sh
  4. +20
    -0
      utils/collectd/patches/910-add-cake-qdisc-types.patch

+ 28
- 0
utils/collectd/Makefile View File

@ -362,6 +362,32 @@ define BuildPlugin
endef
define BuildScriptPlugin
PKG_CONFIG_DEPENDS+= CONFIG_PACKAGE_collectd-mod-$(1)
define Package/collectd-mod-$(1)
$$(call Package/collectd/Default)
TITLE:=$(2) plugin
DEPENDS:= $(4)
endef
define Package/collectd-mod-$(1)/install
$(INSTALL_DIR) $$(1)/usr/libexec/collectd
for m in $(3); do \
${INSTALL_BIN} \
./files/exec-scripts/$$$$$$$${m}.sh $$(1)/usr/libexec/collectd/ ; \
done
if [ -f ./files/usr/share/collectd/plugin/$(1).json ]; then \
$(INSTALL_DIR) $$(1)/usr/share/collectd/plugin ; \
$(INSTALL_DATA) ./files/usr/share/collectd/plugin/$(1).json \
$$(1)/usr/share/collectd/plugin/$(1).json ; \
fi
endef
$$(eval $$(call BuildPackage,collectd-mod-$(1)))
endef
$(eval $(call BuildPackage,collectd))
#$(eval $(call BuildPlugin,NAME,DESCRIPTION,FILES,DEPENDENCIES))
@ -434,3 +460,5 @@ $(eval $(call BuildPlugin,vmem,virtual memory usage input,vmem,))
$(eval $(call BuildPlugin,wireless,wireless status input,wireless,))
$(eval $(call BuildPlugin,write-graphite,Carbon/Graphite output,write_graphite,+PACKAGE_collectd-mod-write-graphite:libpthread))
$(eval $(call BuildPlugin,write-http,HTTP POST output,write_http,+PACKAGE_collectd-mod-write-http:libcurl))
$(eval $(call BuildScriptPlugin,sqm,SQM/qdisc collection,sqm_collectd,+PACKAGE_collectd-mod-sqm:collectd-mod-exec))

+ 5
- 0
utils/collectd/files/collectd.init View File

@ -11,6 +11,11 @@ NICEPRIO=5
CONFIG_STRING=""
[ -d /usr/libexec/collectd ] && {
find /usr/libexec/collectd ! -perm 0500 -exec chmod 0500 '{}' '+'
find /usr/libexec/collectd ! \( -user nobody -a -group nogroup \) -exec chown nobody:nogroup '{}' '+'
}
process_exec() {
printf "<Plugin exec>\n" >> "$COLLECTD_CONF"
config_foreach process_exec_sections exec_input "Exec"


+ 106
- 0
utils/collectd/files/exec-scripts/sqm_collectd.sh View File

@ -0,0 +1,106 @@
#!/bin/sh
. /usr/share/libubox/jshn.sh
HOSTNAME="${COLLECTD_HOSTNAME:-localhost}"
INTERVAL="${COLLECTD_INTERVAL:-60.000}"
handle_cake() {
local ifc ifr tin i
ifc="$1"
ifr="${ifc//./_}"
# Overall
json_get_vars bytes packets drops backlog qlen
# Options
json_select options
json_get_vars bandwidth diffserv
json_select ".."
echo "PUTVAL \"$HOSTNAME/sqm-$ifc/qdisc_bytes\" interval=$INTERVAL N:$bytes"
echo "PUTVAL \"$HOSTNAME/sqm-$ifc/qdisc_drops\" interval=$INTERVAL N:$drops"
echo "PUTVAL \"$HOSTNAME/sqm-$ifc/qdisc_backlog\" interval=$INTERVAL N:$backlog"
# ash doesn't have arrays so prepare to get a little creative
case "$diffserv" in
diffserv3 | diffserv4) tns="BKBEVIVO"
;;
*) tns="T0T1T2T3T4T5T6T7"
;;
esac
# Tins
# Flows & delays indicate the state as of the last packet that flowed through, so they appear to get stuck.
# Discard the results from a stuck tin.
json_get_keys tins tins
json_select tins
i=0
for tin in $tins; do
json_select "$tin"
json_get_vars threshold_rate sent_bytes sent_packets backlog_bytes target_us peak_delay_us avg_delay_us base_delay_us drops ecn_mark ack_drops sparse_flows bulk_flows unresponsive_flows
eval osp="\$osp${ifr}t${i}"
if [ "$osp" ] && [ "$osp" -eq "$sent_packets" ] ; then
peak_delay_us=0; avg_delay_us=0; base_delay_us=0
sparse_flows=0; bulk_flows=0; unresponsive_flows=0
else
eval "osp${ifr}t${i}=$sent_packets"
fi
tn=${tns:$((i<<1)):2}
echo "PUTVAL \"$HOSTNAME/sqmcake-$ifc/qdisct_bytes-$tn\" interval=$INTERVAL N:$sent_bytes"
echo "PUTVAL \"$HOSTNAME/sqmcake-$ifc/qdisct_thres-$tn\" interval=$INTERVAL N:$threshold_rate"
echo "PUTVAL \"$HOSTNAME/sqmcake-$ifc/qdisct_drops-$tn\" interval=$INTERVAL N:$drops:$ecn_mark:$ack_drops"
echo "PUTVAL \"$HOSTNAME/sqmcake-$ifc/qdisct_backlog-$tn\" interval=$INTERVAL N:$backlog_bytes"
echo "PUTVAL \"$HOSTNAME/sqmcake-$ifc/qdisct_flows-$tn\" interval=$INTERVAL N:$sparse_flows:$bulk_flows:$unresponsive_flows"
echo "PUTVAL \"$HOSTNAME/sqmcake-$ifc/qdisct_latencyus-$tn\" interval=$INTERVAL N:$target_us:$peak_delay_us:$avg_delay_us:$base_delay_us"
json_select ..
i=$((i+1))
done
json_select ..
}
handle_mq() {
ifc="$1"
# Overall
json_get_vars bytes drops backlog
echo "PUTVAL \"$HOSTNAME/sqm-$ifc/qdisc_bytes\" interval=$INTERVAL N:$bytes"
echo "PUTVAL \"$HOSTNAME/sqm-$ifc/qdisc_drops\" interval=$INTERVAL N:$drops"
echo "PUTVAL \"$HOSTNAME/sqm-$ifc/qdisc_backlog\" interval=$INTERVAL N:$backlog"
}
process_qdisc() {
local ifc jsn
ifc="$1"
jsn=$(tc -s -j qdisc show dev "$ifc")
# strip leading & trailing []
jsn="${jsn#[}" ; jsn="${jsn%]}"
json_load "${jsn}"
json_get_var qdisc kind
case "$qdisc" in
cake) handle_cake "$ifc"
;;
mq) handle_mq "$ifc"
;;
*) echo "Unknown qdisc type" 1>&2
;;
esac
json_cleanup
}
while true ; do
for ifc in "$@" ; do
process_qdisc "$ifc"
done
sleep "${INTERVAL%.000}"
done

+ 20
- 0
utils/collectd/patches/910-add-cake-qdisc-types.patch View File

@ -0,0 +1,20 @@
--- a/src/types.db
+++ b/src/types.db
@@ -303,6 +303,17 @@ vs_memory value:GAUGE:0:92
vs_processes value:GAUGE:0:65535
vs_threads value:GAUGE:0:65535
+qdisc_bytes value:DERIVE:0:U
+qdisc_drops value:DERIVE:0:U
+qdisc_backlog value:GAUGE:0:U
+
+qdisct_bytes value:DERIVE:0:U
+qdisct_thres value:GAUGE:0:U
+qdisct_drops drops:DERIVE:0:U, ecn:DERIVE:0:U, ack:DERIVE:0:U
+qdisct_backlog value:GAUGE:0:U
+qdisct_latencyus tg:GAUGE:0:U, pk:GAUGE:0:U, av:GAUGE:0:U, sp:GAUGE:0:U
+qdisct_flows sp:GAUGE:0:U, bu:GAUGE:0:U, un:GAUGE:0:U
+
#
# Legacy types
# (required for the v5 upgrade target)

Loading…
Cancel
Save