Skip to content
Snippets Groups Projects
Commit fc8910b7 authored by Christian Elberfeld's avatar Christian Elberfeld
Browse files
parents 0f0eb7e9 e270beb4
No related branches found
No related tags found
No related merge requests found
Showing
with 785 additions and 50 deletions
--- ---
# Pakete installieren
- name: pakete installieren
- name: stop prometheus-node-exporter
service: name=prometheus-node-exporter state=stopped
# Pakete deinstallieren
- name: pakete deinstallieren
apt: apt:
pkg: "{{ item }}" pkg: "{{ item }}"
update_cache: yes update_cache: no
state: installed state: absent
with_items: with_items:
- prometheus-node-exporter - prometheus-node-exporter
- name: reload systemd and enable service
systemd:
name: prometheus-node-exporter
enabled: yes
daemon_reload: yes
- name: restart prometheus-node-exporter
service: name=prometheus-node-exporter state=restarted
- name: restart telegraf
service: name=telegraf state=restarted
---
# Pakete installieren
- name: pakete installieren
apt:
pkg: "{{ item }}"
update_cache: yes
state: installed
with_items:
- telegraf
- name: pakete installieren (host_type = physical)
apt:
pkg: "{{ item }}"
update_cache: yes
state: installed
with_items:
- hddtemp
- lm-sensors
- smartmontools
when: host_type == 'physical'
- name: create main config
template: src={{ item }} dest=/etc/telegraf/{{ item }}
with_items:
- telegraf.conf
notify: restart telegraf
- name: reload systemd and enable service
command: systemctl enable telegraf
notify: restart telegraf
- name: delete config file 1
file:
path: "/etc/telegraf/telegraf.d/outputs-influxdb.conf"
state: absent
# Global tags can be specified here in key="value" format.
[global_tags]
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
# rack = "1a"
## Environment variables can be used as tags, and throughout the config file
# user = "$USER"
server = "true"
host_type = "{{host_type}}"
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "5s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## For failed writes, telegraf will cache metric_buffer_limit metrics for each
## output, and will flush this buffer on a successful write. Oldest metrics
## are dropped first when this buffer fills.
## This buffer only fills when writes fail to output plugin(s).
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. You shouldn't set this below
## interval. Maximum flush_interval will be flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s.
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
precision = ""
## Logging configuration:
## Run telegraf with debug log messages.
debug = false
## Run telegraf in quiet mode (error log messages only).
quiet = false
## Specify the log file name. The empty string means to log to stderr.
logfile = ""
## Override default hostname, if empty use os.Hostname()
hostname = "{{ inventory_hostname }}"
## If set to true, do no set the "host" tag in the telegraf agent.
omit_hostname = false
###############################################################################
# OUTPUT PLUGINS #
###############################################################################
# Configuration for sending metrics to InfluxDB
[[outputs.influxdb]]
## The full HTTP or UDP URL for your InfluxDB instance.
##
urls = ["{{influxdb_sysmon.url}}"]
## The target database for metrics; will be created as needed.
database = "{{influxdb_sysmon.db}}"
## If true, no CREATE DATABASE queries will be sent. Set to true when using
## Telegraf with a user without permissions to create databases or when the
## database already exists.
skip_database_creation = true
## Name of existing retention policy to write to. Empty string writes to
## the default retention policy.
# retention_policy = ""
## Write consistency (clusters only), can be: "any", "one", "quorum", "all"
# write_consistency = "any"
## Timeout for HTTP messages.
# timeout = "5s"
## HTTP Basic Auth
username = "{{influxdb_sysmon.user}}"
password = "{{influxdb_sysmon.password}}"
## HTTP User-Agent
# user_agent = "telegraf"
## UDP payload size is the maximum packet size to send.
# udp_payload = 512
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## HTTP Proxy override, if unset values the standard proxy environment
## variables are consulted to determine which proxy, if any, should be used.
# http_proxy = "http://corporate.proxy:3128"
## Additional HTTP headers
# http_headers = {"X-Special-Header" = "Special-Value"}
## HTTP Content-Encoding for write request body, can be set to "gzip" to
## compress body or "identity" to apply no encoding.
# content_encoding = "identity"
## When true, Telegraf will output unsigned integers as unsigned values,
## i.e.: "42u". You will need a version of InfluxDB supporting unsigned
## integer values. Enabling this option will result in field type errors if
## existing data has been written.
# influx_uint_support = false
###############################################################################
# INPUT PLUGINS #
###############################################################################
# Read metrics about system load & uptime
[[inputs.system]]
# no configuration
[[inputs.interrupts]]
# no configuration
[[inputs.linux_sysctl_fs]]
# no configuration
# Get kernel statistics from /proc/stat
[[inputs.kernel]]
# no configuration
# Read metrics about memory usage
[[inputs.mem]]
# no configuration
# Read metrics about swap memory usage
[[inputs.swap]]
# no configuration
# Get the number of processes and group them by status
[[inputs.processes]]
# no configuration
# Read metrics about cpu usage
[[inputs.cpu]]
## Whether to report per-cpu stats or not
percpu = true
## Whether to report total system cpu stats or not
totalcpu = true
## If true, collect raw CPU time metrics.
collect_cpu_time = false
## If true, compute and report the sum of all non-idle CPU states.
report_active = false
# Read metrics about disk usage by mount point
[[inputs.disk]]
## By default stats will be gathered for all mount points.
## Set mount_points will restrict the stats to only the specified mount points.
mount_points = ["/", "/srv"]
## Ignore mount points by filesystem type.
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "sysfs", "overlay" ]
# Read metrics about disk IO by device
[[inputs.diskio]]
## By default, telegraf will gather stats for all devices including
## disk partitions.
## Setting devices will restrict the stats to the specified devices.
# devices = ["sda", "sdb"]
## Uncomment the following line if you need disk serial numbers.
# skip_serial_number = false
#
## On systems which support it, device metadata can be added in the form of
## tags.
## Currently only Linux is supported via udev properties. You can view
## available properties for a device by running:
## 'udevadm info -q property -n /dev/sda'
# device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
#
## Using the same metadata source as device_tags, you can also customize the
## name of the device via templates.
## The 'name_templates' parameter is a list of templates to try and apply to
## the device. The template may contain variables in the form of '$PROPERTY' or
## '${PROPERTY}'. The first template which does not contain any variables not
## present for the device is used as the device name tag.
## The typical use case is for LVM volumes, to get the VG/LV name instead of
## the near-meaningless DM-0 name.
# name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]
# Read metrics about network interface usage
[[inputs.net]]
## By default, telegraf gathers stats from any up interface (excluding loopback)
## Setting interfaces will tell it to gather these explicit interfaces,
## regardless of status.
##
interfaces = ["eth*", "en*"]
##
## On linux systems telegraf also collects protocol stats.
## Setting ignore_protocol_stats to true will skip reporting of protocol metrics.
##
# ignore_protocol_stats = false
##
# Read metrics about network usage
[[inputs.netstat]]
# no configuration
# Collects conntrack stats from the configured directories and files.
[[inputs.conntrack]]
## The following defaults would work with multiple versions of conntrack.
## Note the nf_ and ip_ filename prefixes are mutually exclusive across
## kernel versions, as are the directory locations.
## Superset of filenames to look for within the conntrack dirs.
## Missing files will be ignored.
files = ["ip_conntrack_count","ip_conntrack_max",
"nf_conntrack_count","nf_conntrack_max"]
## Directories to search within for the conntrack files above.
## Missing directrories will be ignored.
dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
{% if host_type == 'physical' %}
# Monitor disks' temperatures using hddtemp
[[inputs.hddtemp]]
## By default, telegraf gathers temps data from all disks detected by the
## hddtemp.
##
## Only collect temps from the selected disks.
##
## A * as the device name will return the temperature values of all disks.
##
# address = "127.0.0.1:7634"
# devices = ["sda", "*"]
# Monitor sensors, requires lm-sensors package
[[inputs.sensors]]
## Remove numbers from field names.
## If true, a field name like 'temp1_input' will be changed to 'temp_input'.
# remove_numbers = true
# Read metrics from storage devices supporting S.M.A.R.T.
[[inputs.smart]]
## Optionally specify the path to the smartctl executable
# path = "/usr/bin/smartctl"
#
## On most platforms smartctl requires root access.
## Setting 'use_sudo' to true will make use of sudo to run smartctl.
## Sudo must be configured to to allow the telegraf user to run smartctl
## with out password.
# use_sudo = false
#
## Skip checking disks in this power mode. Defaults to
## "standby" to not wake up disks that have stoped rotating.
## See --nocheck in the man pages for smartctl.
## smartctl version 5.41 and 5.42 have faulty detection of
## power mode and might require changing this value to
## "never" depending on your disks.
# nocheck = "standby"
#
## Gather detailed metrics for each SMART Attribute.
## Defaults to "false"
##
# attributes = false
#
## Optionally specify devices to exclude from reporting.
# excludes = [ "/dev/pass6" ]
#
## Optionally specify devices and device type, if unset
## a scan (smartctl --scan) for S.M.A.R.T. devices will
## done and all found will be included except for the
## excluded in excludes.
# devices = [ "/dev/ada0 -d atacam" ]
{% else %}
{% endif %}
\ No newline at end of file
...@@ -6,5 +6,13 @@ ldap_ip_ext: 10.0.20.2 ...@@ -6,5 +6,13 @@ ldap_ip_ext: 10.0.20.2
# int ist noch ungenutzt / später replikation in der Zone # int ist noch ungenutzt / später replikation in der Zone
ldap_ip_int: 10.0.20.2 ldap_ip_int: 10.0.20.2
ldap_base_dn: DC=warpzone,DC=ms ldap_base_dn: dc=warpzone,dc=ms
ldap_readonly_bind_dn: CN=readonly,DC=warpzone,DC=ms ldap_readonly_bind_dn: cn=readonly,dc=warpzone,dc=ms
\ No newline at end of file
# Zentrale InfluxDb für Systemmonitoring
influxdb_sysmon:
url: "http://192.168.0.201:18086"
db: "influx"
user: "influx"
password: "influx"
...@@ -10,6 +10,10 @@ debian_sources: ...@@ -10,6 +10,10 @@ debian_sources:
debian_keys: debian_keys:
# Art des Hosts: physical, vm, docker
host_type: "physical"
administratorenteam: administratorenteam:
- "ole" - "ole"
- "larsm" - "larsm"
......
...@@ -14,6 +14,9 @@ debian_keys: ...@@ -14,6 +14,9 @@ debian_keys:
- "https://homegear.eu/packages/Release.key" - "https://homegear.eu/packages/Release.key"
- "https://bintray.com/user/downloadSubjectPublicKey?username=openhab" - "https://bintray.com/user/downloadSubjectPublicKey?username=openhab"
# Art des Hosts: physical, vm, docker
host_type: "physical"
administratorenteam: administratorenteam:
- "user51" - "user51"
- "void" - "void"
......
...@@ -12,6 +12,9 @@ debian_sources: ...@@ -12,6 +12,9 @@ debian_sources:
debian_keys: debian_keys:
# Art des Hosts: physical, vm, docker
host_type: "physical"
administratorenteam: administratorenteam:
- "void" - "void"
- "sandhome" - "sandhome"
......
...@@ -11,10 +11,17 @@ debian_sources: ...@@ -11,10 +11,17 @@ debian_sources:
- "deb http://debian.uni-duisburg-essen.de/debian/ jessie-updates main contrib non-free" - "deb http://debian.uni-duisburg-essen.de/debian/ jessie-updates main contrib non-free"
- "deb http://ftp.debian.org/debian jessie-backports main" - "deb http://ftp.debian.org/debian jessie-backports main"
- "deb https://apt.dockerproject.org/repo debian-jessie main" - "deb https://apt.dockerproject.org/repo debian-jessie main"
- "deb [arch=amd64] https://download.docker.com/linux/debian jessie stable" - "deb https://download.docker.com/linux/debian jessie stable"
- "deb https://repos.influxdata.com/debian stretch stable"
debian_keys: debian_keys:
- "https://download.docker.com/linux/debian/gpg" - "https://download.docker.com/linux/debian/gpg"
- "https://repos.influxdata.com/influxdb.key"
# Art des Hosts: physical, vm, docker
host_type: "physical"
webserver_domains: webserver_domains:
- "infra" - "infra"
......
...@@ -10,10 +10,16 @@ debian_sources: ...@@ -10,10 +10,16 @@ debian_sources:
- "deb http://security.debian.org/ stretch/updates main contrib non-free" - "deb http://security.debian.org/ stretch/updates main contrib non-free"
- "deb http://ftp.de.debian.org/debian/ stretch-updates main" - "deb http://ftp.de.debian.org/debian/ stretch-updates main"
- "deb http://ftp.halifax.rwth-aachen.de/debian/ stretch-updates main contrib non-free" - "deb http://ftp.halifax.rwth-aachen.de/debian/ stretch-updates main contrib non-free"
- "deb [arch=amd64] https://download.docker.com/linux/debian stretch stable" - "deb https://download.docker.com/linux/debian stretch stable"
- "deb https://repos.influxdata.com/debian stretch stable"
debian_keys: debian_keys:
- "https://download.docker.com/linux/debian/gpg" - "https://download.docker.com/linux/debian/gpg"
- "https://repos.influxdata.com/influxdb.key"
# Art des Hosts: physical, vm, docker
host_type: "vm"
letsencrypt_tos_sha256: 6373439b9f29d67a5cd4d18cbc7f264809342dbf21cb2ba2fc7588df987a6221 letsencrypt_tos_sha256: 6373439b9f29d67a5cd4d18cbc7f264809342dbf21cb2ba2fc7588df987a6221
...@@ -21,6 +27,8 @@ letsencrypt_tos_sha256: 6373439b9f29d67a5cd4d18cbc7f264809342dbf21cb2ba2fc7588df ...@@ -21,6 +27,8 @@ letsencrypt_tos_sha256: 6373439b9f29d67a5cd4d18cbc7f264809342dbf21cb2ba2fc7588df
letsencrypt_mail: verwaltung@warpzone.ms letsencrypt_mail: verwaltung@warpzone.ms
webserver_domains: webserver_domains:
- "auth"
- "alerta"
- "gitlab" - "gitlab"
- "infra" - "infra"
- "infra-test" - "infra-test"
...@@ -32,6 +40,7 @@ webserver_domains: ...@@ -32,6 +40,7 @@ webserver_domains:
- "proxy.jabber-test" - "proxy.jabber-test"
- "ldap" - "ldap"
- "mattermost" - "mattermost"
- "md"
- "pad" - "pad"
- "wiki" - "wiki"
- "www" - "www"
......
---
- include: ../functions/get_secret.yml
with_items:
- { path: /srv/influx/influx_admin_pw, length: 24 }
- name: create folder struct for influx
file:
path: "{{ item }}"
state: "directory"
with_items:
- /srv/influx/
- /srv/influx/sysmon/
- name: Konfig-Dateien erstellen
template:
src: "{{ item }}"
dest: "/srv/influx/{{ item }}"
with_items:
- docker-compose.yml
- name: Script-Dateien erstellen
template:
src: "{{ item }}"
dest: "/srv/influx/{{ item }}"
mode: "o+rwx"
with_items:
- influx_sysmon.sh
- name: start influx docker
docker_service:
project_src: /srv/influx/
state: present
version: "3"
services:
sysmon:
image: influxdb:1.5.3
restart: always
ports:
- 0.0.0.0:18086:8086
volumes:
- /srv/influx/sysmon/:/var/lib/influxdb
environment:
INFLUXDB_DB: "{{influxdb_sysmon.db}}"
INFLUXDB_ADMIN_USER: "admin"
INFLUXDB_ADMIN_PASSWORD: "{{ influx_admin_pw }}"
INFLUXDB_USER: "{{influxdb_sysmon.user}}"
INFLUXDB_USER_PASSWORD: "{{influxdb_sysmon.password}}"
INFLUXDB_HTTP_AUTH_ENABLED: "true"
#!/bin/sh
# Influx Admin Konsole
# Usage: ./influx_sysmon.sh
docker-compose exec sysmon influx -database "{{influxdb_sysmon.db}}" -password "{{ influx_admin_pw }}" -username "admin"
---
- name: create folder struct for influxdb
file:
path: "{{ item }}"
state: "directory"
with_items:
- "/srv/influxdb/"
- "/srv/influxdb/data/"
- name: Docker Compose Konfig-Datei erstellen
template:
src: "docker-compose.yml"
dest: "/srv/influxdb/docker-compose.yml"
- name: start influxdb docker
docker_service:
project_src: /srv/influxdb/
state: present
version: "3"
services:
db:
image: influxdb:1.5.1
restart: always
ports:
- 8086:8086
- 2003:2003
volumes:
- /srv/influxdb/data/:/var/lib/influxdb
environment:
INFLUXDB_GRAPHITE_ENABLED: "true"
---
- name: restart kapacitor docker
docker_service:
project_src: /srv/kapacitor/
state: present
restarted: yes
---
- include: ../functions/get_secret.yml
with_items:
- { path: /srv/influx/influx_admin_pw, length: 24 }
- { path: /srv/kapacitor/alerta_token, length: -1 }
- name: create folder struct for kapacitor
file:
path: "{{ item }}"
state: "directory"
with_items:
- /srv/kapacitor/
- /srv/kapacitor/data/
- /srv/kapacitor/load/
- /srv/kapacitor/load/tasks/
- /srv/kapacitor/load/templates/
- /srv/kapacitor/load/handlers/
- name: Konfig-Dateien erstellen
template:
src: "{{ item }}"
dest: "/srv/kapacitor/{{ item }}"
with_items:
- docker-compose.yml
- kapacitor.conf
notify: restart kapacitor docker
- name: Script-Dateien erstellen
template:
src: "{{ item }}"
dest: "/srv/kapacitor/{{ item }}"
mode: "o+rwx"
with_items:
- kapacitor_listtasks.sh
- kapacitor_show.sh
- kapacitor_watch.sh
- name: Kapacitor-Tasks erstellen
template:
src: "{{ item }}"
dest: "/srv/kapacitor/load/tasks/{{ item }}"
with_items:
- task_cpu_high.tick
- task_disk_low.tick
- task_load1_high.tick
- task_load5_high.tick
- task_load15_high.tick
- task_mem_used.tick
- task_notraffic_globe.tick
- task_notraffic_telekom.tick
notify: restart kapacitor docker
- name: start kapacitor docker
docker_service:
project_src: /srv/kapacitor/
state: present
version: "3"
services:
app:
image: kapacitor:1.5
restart: always
ports:
- 0.0.0.0:9092:9092
volumes:
- /srv/kapacitor/kapacitor.conf:/etc/kapacitor/kapacitor.conf:ro
- /srv/kapacitor/data/:/var/lib/kapacitor/
- /srv/kapacitor/load/:/etc/kapacitor/load
# The hostname of this node.
# Must be resolvable by any configured InfluxDB hosts.
hostname = "10.5.0.111"
# Directory for storing a small amount of metadata about the server.
data_dir = "/var/lib/kapacitor"
# Do not apply configuration overrides during startup.
# Useful if the configuration overrides cause Kapacitor to fail startup.
# This option is intended as a safe guard and should not be needed in practice.
skip-config-overrides = true
# Default retention-policy, if a write is made to Kapacitor and
# it does not have a retention policy associated with it,
# then the retention policy will be set to this value
default-retention-policy = ""
[http]
# HTTP API Server for Kapacitor
# This server is always on,
# it serves both as a write endpoint
# and as the API endpoint for all other
# Kapacitor calls.
bind-address = ":9092"
log-enabled = true
write-tracing = false
pprof-enabled = false
https-enabled = false
[config-override]
# Enable/Disable the service for overridding configuration via the HTTP API.
enabled = true
[logging]
# Destination for logs
# Can be a path to a file or 'STDOUT', 'STDERR'.
file = "STDOUT"
# Logging level can be one of:
# DEBUG, INFO, ERROR
# HTTP logging can be disabled in the [http] config section.
level = "INFO"
[load]
# Enable/Disable the service for loading tasks/templates/handlers
# from a directory
enabled = true
# Directory where task/template/handler files are set
dir = "/etc/kapacitor/load"
[replay]
# Where to store replay files, aka recordings.
dir = "/var/lib/kapacitor/replay"
[task]
# Where to store the tasks database
# DEPRECATED: This option is not needed for new installations.
# It is only used to determine the location of the task.db file
# for migrating to the new `storage` service.
dir = "/var/lib/kapacitor/tasks"
# How often to snapshot running task state.
snapshot-interval = "60s"
[storage]
# Where to store the Kapacitor boltdb database
boltdb = "/var/lib/kapacitor/kapacitor.db"
[deadman]
# Configure a deadman's switch
# Globally configure deadman's switches on all tasks.
# NOTE: for this to be of use you must also globally configure at least one alerting method.
global = false
# Threshold, if globally configured the alert will be triggered if the throughput in points/interval is <= threshold.
threshold = 5.0
# Interval, if globally configured the frequency at which to check the throughput.
interval = "10s"
{% raw %}
# Id -- the alert Id, NODE_NAME will be replaced with the name of the node being monitored.
id = "node 'NODE_NAME' in task '{{ .TaskName }}'"
# The message of the alert. INTERVAL will be replaced by the interval.
message = "{{ .ID }} is {{ if eq .Level \"OK\" }}alive{{ else }}dead{{ end }}: {{ index .Fields \"collected\" | printf \"%0.3f\" }} points/INTERVAL."
{% endraw %}
# Multiple InfluxDB configurations can be defined.
# Exactly one must be marked as the default.
# Each one will be given a name and can be referenced in batch queries and InfluxDBOut nodes.
[[influxdb]]
# Connect to an InfluxDB cluster
# Kapacitor can subscribe, query and write to this cluster.
# Using InfluxDB is not required and can be disabled.
enabled = true
default = true
name = "sysmon"
urls = [ "{{ influxdb_sysmon.url }}" ]
username = "admin"
password = "{{ influx_admin_pw }}"
timeout = 0
# Do not verify the TLS/SSL certificate.
# This is insecure.
insecure-skip-verify = false
# Maximum time to try and connect to InfluxDB during startup
startup-timeout = "5m"
# Turn off all subscriptions
disable-subscriptions = false
# Subscription mode is either "cluster" or "server"
subscription-mode = "server"
# Which protocol to use for subscriptions
# one of 'udp', 'http', or 'https'.
subscription-protocol = "http"
# Subscriptions resync time interval
# Useful if you want to subscribe to new created databases
# without restart Kapacitord
subscriptions-sync-interval = "1m0s"
# Override the global hostname option for this InfluxDB cluster.
# Useful if the InfluxDB cluster is in a separate network and
# needs special config to connect back to this Kapacitor instance.
# Defaults to `hostname` if empty.
kapacitor-hostname = ""
# Override the global http port option for this InfluxDB cluster.
# Useful if the InfluxDB cluster is in a separate network and
# needs special config to connect back to this Kapacitor instance.
# Defaults to the port from `[http] bind-address` if 0.
http-port = 0
# Host part of a bind address for UDP listeners.
# For example if a UDP listener is using port 1234
# and `udp-bind = "hostname_or_ip"`,
# then the UDP port will be bound to `hostname_or_ip:1234`
# The default empty value will bind to all addresses.
udp-bind = ""
# Subscriptions use the UDP network protocl.
# The following options of for the created UDP listeners for each subscription.
# Number of packets to buffer when reading packets off the socket.
udp-buffer = 1000
# The size in bytes of the OS read buffer for the UDP socket.
# A value of 0 indicates use the OS default.
udp-read-buffer = 0
[influxdb.subscriptions]
# Set of databases and retention policies to subscribe to.
# If empty will subscribe to all, minus the list in
# influxdb.excluded-subscriptions
#
# Format
# db_name = <list of retention policies>
#
# Example:
# my_database = [ "default", "longterm" ]
[influxdb.excluded-subscriptions]
# Set of databases and retention policies to exclude from the subscriptions.
# If influxdb.subscriptions is empty it will subscribe to all
# except databases listed here.
#
# Format
# db_name = <list of retention policies>
#
# Example:
# my_database = [ "default", "longterm" ]
[alerta]
# Configure Alerta.
enabled = true
# The Alerta URL.
url = "https://alerta.warpzone.ms/api"
# Default authentication token.
token = "{{ alerta_token }}"
# Default token prefix
# If you are on older versions of alerta you may need to change this to "Key"
token-prefix = "Key"
# Default environment.
environment = "warpzone"
# Default origin.
origin = "Kapacitor"
[udf]
# Configuration for UDFs (User Defined Functions)
[udf.functions]
# Example go UDF.
# First compile example:
# go build -o avg_udf ./udf/agent/examples/moving_avg.go
#
# Use in TICKscript like:
# stream.goavg()
# .field('value')
# .size(10)
# .as('m_average')
#
# uncomment to enable
#[udf.functions.goavg]
# prog = "./avg_udf"
# args = []
# timeout = "10s"
# Example python UDF.
# Use in TICKscript like:
# stream.pyavg()
# .field('value')
# .size(10)
# .as('m_average')
#
# uncomment to enable
#[udf.functions.pyavg]
# prog = "/usr/bin/python2"
# args = ["-u", "./udf/agent/examples/moving_avg.py"]
# timeout = "10s"
# [udf.functions.pyavg.env]
# PYTHONPATH = "./udf/agent/py"
# Example UDF over a socket
#[udf.functions.myCustomUDF]
# socket = "/path/to/socket"
# timeout = "10s"
#!/bin/sh
# Show Kapacitor Tasks
# Usage: ./kapacitor_listtasks.sh
docker-compose exec app kapacitor list tasks
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment