Skip to content
Snippets Groups Projects
Commit 4ad57aa9 authored by Christian Elberfeld's avatar Christian Elberfeld
Browse files

cleanup old services

parent 8422401c
No related branches found
No related tags found
No related merge requests found
......@@ -52,21 +52,16 @@ alert:
warn: 2
crit: 4
containers:
- { name: "dockerstats_app_1" }
- { name: "influx_sysmon_1" }
- { name: "dockerstats_app_1" }
- { name: "grafana_app_1" }
- { name: "unifi_app_1" }
- { name: "l4z0r_db_1" }
- { name: "ldap_openldap_1" }
- { name: "ldap_phpldapadmin_1" }
- { name: "ldap_syncreplexporter_1" }
- { name: "matestatdb_db_1" }
- { name: "l4z0r_db_1" }
- { name: "warpinfratest_app_1" }
- { name: "warpinfratest_db_1" }
- { name: "mqtt-service" }
- { name: "nodered-app" }
- { name: "prometheus-alert" }
- { name: "prometheus-statsd-exporter" }
- { name: "prometheus-snmp-exporter" }
- { name: "prometheus-blackbox-exporter" }
- { name: "mqtt-service" }
- { name: "unifi_app_1" }
- { name: "warpinfra-db" }
- { name: "warpinfra-app" }
disks:
......
......@@ -38,12 +38,10 @@
- { role: common/docker_ldap, tags: ldap }
- { role: common/nginx, tags: nginx }
- { role: warpsrvint/docker_grafana, tags: grafana }
- { role: warpsrvint/docker_influx, tags: influx }
- { role: warpsrvint/docker_l4z0r, tags: l4z0r }
- { role: warpsrvint/docker_matestatdb, tags: matestatdb }
- { role: warpsrvint/docker_mqtt, tags: mqtt }
- { role: warpsrvint/docker_nodered, tags: nodered }
- { role: warpsrvint/docker_prometheus, tags: prometheus }
- { role: warpsrvint/docker_unifi, tags: unifi }
- { role: warpsrvint/docker_warpinfra, tags: warpinfra }
......
---
- include_tasks: ../functions/get_secret.yml
with_items:
- { path: /srv/influx/influx_admin_pw, length: 24 }
- name: create folder struct for influx
file:
path: "{{ item }}"
state: "directory"
with_items:
- /srv/influx/
- /srv/influx/sysmon/
- name: Konfig-Dateien erstellen
template:
src: "{{ item }}"
dest: "/srv/influx/{{ item }}"
with_items:
- docker-compose.yml
- name: Script-Dateien erstellen
template:
src: "{{ item }}"
dest: "/srv/influx/{{ item }}"
mode: "o+rwx"
with_items:
- influx_sysmon.sh
- name: start influx docker
docker_service:
project_src: /srv/influx/
state: present
version: "3"
services:
sysmon:
image: influxdb:1.7.9
restart: always
ports:
- 0.0.0.0:18086:8086
volumes:
- /srv/influx/sysmon/:/var/lib/influxdb
environment:
INFLUXDB_DB: "{{influxdb_sysmon.db}}"
INFLUXDB_ADMIN_USER: "admin"
INFLUXDB_ADMIN_PASSWORD: "{{ influx_admin_pw }}"
INFLUXDB_USER: "{{influxdb_sysmon.user}}"
INFLUXDB_USER_PASSWORD: "{{influxdb_sysmon.password}}"
INFLUXDB_HTTP_AUTH_ENABLED: "true"
#!/bin/sh
# Influx Admin Konsole
# Usage: ./influx_sysmon.sh
docker-compose exec sysmon influx -database "{{influxdb_sysmon.db}}" -password "{{ influx_admin_pw }}" -username "admin"
---
- name: create folder struct for prometheus
file:
path: "{{ item }}"
state: "directory"
with_items:
- /srv/prometheus/
- /srv/prometheus/config/
- /srv/prometheus/data/
- /srv/prometheus/alert-data/
- name: create config files
template: src={{ item }} dest=/srv/prometheus/config/{{ item }}
with_items:
- alertmanager.yml
- prometheus.yml
- prometheus.rules
register: config
- name: start prometheus blackbox-exporter docker
docker_container:
name: prometheus-blackbox-exporter
image: prom/blackbox-exporter:v0.5.0
state: started
restart_policy: always
ports:
- 0.0.0.0:9115:9115
- name: start prometheus snmp-exporter docker
docker_container:
name: prometheus-snmp-exporter
image: prom/snmp-exporter:v0.4.0
state: started
restart_policy: always
ports:
- 0.0.0.0:9116:9116
- name: start prometheus statsd-exporter docker
docker_container:
name: prometheus-statsd-exporter
image: prom/statsd-exporter:v0.4.0
state: started
restart_policy: always
ports:
- 0.0.0.0:9102:9102
- 0.0.0.0:9125:9125/udp
- name: stop prometheus-alertmanager docker
docker_container:
name: prometheus-alert
state: absent
when: config.changed
- name: start prometheus-alertmanager docker
docker_container:
name: prometheus-alert
image: prom/alertmanager:v0.7.1
state: started
restart_policy: always
volumes:
- /srv/prometheus/config/alertmanager.yml/:/etc/alertmanager/config.yml
- /srv/prometheus/alert-data/:/alertmanager
ports:
- 0.0.0.0:9093:9093
- name: stop prometheus docker
docker_container:
name: prometheus-app
state: absent
when: config.changed
- name: start prometheus docker
docker_container:
name: prometheus-app
image: prom/prometheus:v1.7.1
state: started
restart_policy: always
volumes:
- /srv/prometheus/config/prometheus.yml/:/etc/prometheus/prometheus.yml
- /srv/prometheus/config/prometheus.rules/:/etc/prometheus/prometheus.rules
- /srv/prometheus/data/:/prometheus
ports:
- 0.0.0.0:9090:9090
links:
- prometheus-blackbox-exporter:blackbox-exporter
- prometheus-snmp-exporter:snmp-exporter
- prometheus-statsd-exporter:statsd-exporter
- prometheus-alert:alertmanager
global:
# The smarthost and SMTP sender used for mail notifications.
smtp_smarthost: '{{ smtp_host }}:{{ smtp_port }}'
smtp_from: '{{ noreply_email_user }}'
# smtp_auth_username: 'alertmanager'
# smtp_auth_password: 'password'
# The root route on which each incoming alert enters.
route:
# The labels by which incoming alerts are grouped together. For example,
# multiple alerts coming in for cluster=A and alertname=LatencyHigh would
# be batched into a single group.
group_by: ['alertname', 'cluster', 'service']
# When a new group of alerts is created by an incoming alert, wait at
# least 'group_wait' to send the initial notification.
# This way ensures that you get multiple alerts for the same group that start
# firing shortly after another are batched together on the first
# notification.
group_wait: 30s
# When the first notification was sent, wait 'group_interval' to send a batch
# of new alerts that started firing for that group.
group_interval: 5m
# If an alert has successfully been sent, wait 'repeat_interval' to
# resend them.
repeat_interval: 3h
# A default receiver
receiver: mail
# Inhibition rules allow to mute a set of alerts given that another alert is
# firing.
# We use this to mute any warning-level notifications if the same alert is
# already critical.
inhibit_rules:
- source_match:
severity: 'critical'
target_match:
severity: 'warning'
# Apply inhibition if the alertname is the same.
equal: ['alertname', 'cluster', 'service']
receivers:
- name: 'mail'
email_configs:
- to: 'void@members.warpzone.ms'
ALERT MEM_FULL_99P
IF ((node_memory_MemTotal - node_memory_MemFree )/ node_memory_MemTotal * 100) > 99
FOR 15m
ALERT HIGH_CPU
IF rate(node_cpu{mode = "idle"}[5m]) < 0.1
FOR 15m
ALERT HIGH_DISK_IO
IF node_disk_io_now > 10
FOR 5m
ALERT DISK_FULL_95P
IF ( 100 *(1 - (node_filesystem_free / node_filesystem_size) ) ) > 95
ALERT DISK_FULL_4H
IF predict_linear(node_filesystem_free[1h],4*3600) < 0
# my global config
global:
scrape_interval: 15s # Set the scrape interval to every 15 seconds. Default is every 1 minute.
evaluation_interval: 15s # Evaluate rules every 15 seconds. The default is every 1 minute.
# scrape_timeout is set to the global default (10s).
# Attach these labels to any time series or alerts when communicating with
# external systems (federation, remote storage, Alertmanager).
external_labels:
monitor: 'prometheus'
# Load rules once and periodically evaluate them according to the global 'evaluation_interval'.
rule_files:
- "/etc/prometheus/prometheus.rules"
alerting:
alertmanagers:
- scheme: http
static_configs:
- targets:
- "alertmanager:9093"
# A scrape configuration containing exactly one endpoint to scrape:
# Here it's Prometheus itself.
scrape_configs:
# The job name is added as a label `job=<job_name>` to any timeseries scraped from this config.
- job_name: 'prometheus'
static_configs:
- targets: ['localhost:9090']
labels:
group: 'service'
- job_name: 'node'
static_configs:
- targets: ['warpsrvint:9100']
labels:
group: 'server'
- job_name: 'snmp'
metrics_path: /snmp
params:
module: [default]
static_configs:
- targets:
- warpfire
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: instance
- target_label: __address__
replacement: snmp-exporter:9116
- job_name: 'ping'
metrics_path: /probe
params:
module: [icmp]
static_configs:
- targets:
- 212.124.34.241 # Next Hop Globe
- 2001:470:1f0a:a3b::1 # HE Tunnel Endpoint
- 8.8.8.8 # Google DNS
- 217.79.181.126 # Server MyLoc IPv4
- 2001:4ba0:ffff:7c::1 # Server MyLoc IPv4
- 10.5.0.1 # warpfire
- 192.168.0.100 # Switch HP
- 192.168.0.101 # Switch Brocade
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: instance
- target_label: __address__
replacement: blackbox-exporter:9115
- job_name: 'http'
metrics_path: /probe
params:
module: [http_2xx]
static_configs:
- targets:
- https://warpzone.ms
- https://gitlab.warpzone.ms
- https://infra.warpzone.ms
- https://mattermost.warpzone.ms
- https://pad.warpzone.ms
- https://wiki.warpzone.ms
relabel_configs:
- source_labels: [__address__]
target_label: __param_target
- source_labels: [__param_target]
target_label: instance
- target_label: __address__
replacement: blackbox-exporter:9115
location /static {
alias /tmp/warpinfratest/static; # your Django project's static files - amend as required
}
location / {
uwsgi_pass unix:///tmp/warpinfratest/warpinfra.sock;
include /etc/nginx/uwsgi_params; # the uwsgi_params file you installed
}
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment