Compile tested: x86/64, aarch64_cortex-a53 Run tested: x86/64 Signed-off-by: Paul Spooren <spooren@informatik.uni-leipzig.de> Signed-off-by: Paul Spooren <mail@aparcar.org>lilik-openwrt-22.03
@ -0,0 +1,56 @@ | |||
include $(TOPDIR)/rules.mk | |||
PKG_NAME:=prometheus | |||
PKG_VERSION:=2.3.2 | |||
PKG_RELEASE:=1 | |||
PKG_SOURCE:=$(PKG_NAME)-$(PKG_VERSION).tar.gz | |||
PKG_SOURCE_URL:=https://codeload.github.com/prometheus/prometheus/tar.gz/v${PKG_VERSION}? | |||
PKG_HASH:=008282497e2e85de6fb17a698dfdae4a942026f623d8a9d45b911a765442cb58 | |||
PKG_LICENSE:=Apache-2.0 | |||
PKG_LICENSE_FILES:=LICENSE | |||
PKG_MAINTAINER:=Paul Spooren <spooren@informatik.uni-leipzig.de> | |||
PKG_BUILD_DEPENDS:=golang/host | |||
PKG_BUILD_PARALLEL:=1 | |||
PKG_USE_MIPS16:=0 | |||
GO_PKG:=github.com/prometheus/prometheus/ | |||
GO_PKG_BUILD_PKG:=github.com/prometheus/prometheus/cmd/prometheus/ | |||
include $(INCLUDE_DIR)/package.mk | |||
include ../../lang/golang/golang-package.mk | |||
define Package/prometheus/Default | |||
TITLE:=Monitoring system & time series database | |||
USERID:=prometheus=112:prometheus=112 | |||
URL:=http://prometheus.io | |||
DEPENDS:=$(GO_ARCH_DEPENDS) | |||
endef | |||
define Package/prometheus | |||
$(call Package/prometheus/Default) | |||
SECTION:=utils | |||
CATEGORY:=Utilities | |||
endef | |||
define Package/prometheus/description | |||
Prometheus, a Cloud Native Computing Foundation project, is a systems and | |||
service monitoring system. It collects metrics from configured targets at given | |||
intervals, evaluates rule expressions, displays the results, and can trigger | |||
alerts if some condition is observed to be true. | |||
endef | |||
define Package/prometheus/install | |||
$(call GoPackage/Package/Install/Bin,$(1)) | |||
$(CP) ./files/* $(1)/ | |||
endef | |||
define Package/prometheus/conffiles | |||
/etc/prometheus.yml | |||
endef | |||
$(eval $(call GoBinPackage,prometheus)) | |||
$(eval $(call BuildPackage,prometheus)) |
@ -0,0 +1,27 @@ | |||
#!/bin/sh /etc/rc.common | |||
START=70 | |||
USE_PROCD=1 | |||
PROG=/usr/bin/prometheus | |||
CONFFILE=/etc/prometheus.yml | |||
start_service() { | |||
local config_file | |||
local storage_tsdb_path | |||
local web_listen_address | |||
config_load "prometheus" | |||
config_get config_file prometheus config_file "$CONFFILE" | |||
config_get storage_tsdb_path prometheus storage_tsdb_path "/data" | |||
config_get web_listen_address prometheus web_listen_address "127.0.0.1:9090" | |||
procd_open_instance | |||
procd_set_param command "$PROG" | |||
procd_append_param command --config.file="$config_file" | |||
procd_append_param command --storage.tsdb.path="$storage_tsdb_path" | |||
procd_append_param command --web.listen-address="$web_listen_address" | |||
procd_append_param user "prometheus" | |||
procd_set_param file "$config_file" | |||
procd_set_param respawn | |||
procd_close_instance | |||
} |
@ -0,0 +1,29 @@ | |||
# my global config | |||
global: | |||
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. | |||
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. | |||
# scrape_timeout is set to the global default (10s). | |||
# Alertmanager configuration | |||
alerting: | |||
alertmanagers: | |||
- static_configs: | |||
- targets: | |||
# - alertmanager:9093 | |||
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'. | |||
rule_files: | |||
# - "first_rules.yml" | |||
# - "second_rules.yml" | |||
# A scrape configuration containing exactly one endpoint to scrape: | |||
# Here it's Prometheus itself. | |||
scrape_configs: | |||
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config. | |||
- job_name: 'prometheus' | |||
# metrics_path defaults to '/metrics' | |||
# scheme defaults to 'http'. | |||
static_configs: | |||
- targets: ['localhost:9090'] |
@ -0,0 +1,13 @@ | |||
#!/bin/sh | |||
[ -e /etc/config/prometheus ] || touch /etc/config/prometheus | |||
uci -q get prometheus.prometheus || { | |||
uci -q batch <<EOF | |||
set prometheus.prometheus=prometheus | |||
set prometheus.prometheus.config_file='/etc/prometheus.yml' | |||
set prometheus.prometheus.storage_tsdb_path='/data' | |||
set prometheus.prometheus.web_listen_address='127.0.0.1:9090' | |||
commit prometheus | |||
EOF | |||
} |
@ -0,0 +1,29 @@ | |||
# my global config | |||
global: | |||
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute. | |||
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute. | |||
# scrape_timeout is set to the global default (10s). | |||
# Alertmanager configuration | |||
alerting: | |||
alertmanagers: | |||
- static_configs: | |||
- targets: | |||
# - alertmanager:9093 | |||
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'. | |||
rule_files: | |||
# - "first_rules.yml" | |||
# - "second_rules.yml" | |||
# A scrape configuration containing exactly one endpoint to scrape: | |||
# Here it's Prometheus itself. | |||
scrape_configs: | |||
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config. | |||
- job_name: 'prometheus' | |||
# metrics_path defaults to '/metrics' | |||
# scheme defaults to 'http'. | |||
static_configs: | |||
- targets: ['localhost:9090'] |