Skip to content
Snippets Groups Projects
Commit 71edebeb authored by Christian Elberfeld's avatar Christian Elberfeld
Browse files

Merge remote-tracking branch 'remotes/origin/master' into vorstand-vm

# Conflicts:
#	all/common/tasks/main.yml
#	host_vars/vorstandspi
#	hosts
#	site.yml
#	vorstandspi/jameica/tasks/main.yml
#	vorstandspi/mysql/tasks/main.yml
parents d8d06833 62ada488
No related branches found
No related tags found
2 merge requests!10Update Branch from Master,!9Umbau Vorstands-Pi zu Vorstands-VM
Showing
with 509 additions and 150 deletions
...@@ -4,11 +4,11 @@ ...@@ -4,11 +4,11 @@
- name: create motd file - name: create motd file
template: src=motd.j2 dest=/etc/motd template: src=motd.j2 dest=/etc/motd
- name: install apt-transport-https packages before adding sources - name: install apt-transport-https packages before adding sources
apt: apt:
pkg: "{{ item }}" pkg: "{{ item }}"
update_cache: yes update_cache: yes
state: installed state: present
with_items: with_items:
- apt-transport-https - apt-transport-https
...@@ -26,29 +26,28 @@ ...@@ -26,29 +26,28 @@
with_items: "{{ debian_keys_url }}" with_items: "{{ debian_keys_url }}"
when: debian_keys_url when: debian_keys_url
- name: set debian repos - name: set debian repos
template: src=sources.list.j2 dest=/etc/apt/sources.list template: src=sources.list.j2 dest=/etc/apt/sources.list
- name: install common packages - name: install common packages
apt: apt:
pkg: "{{ item }}" pkg: "{{ item }}"
update_cache: yes update_cache: yes
state: installed state: present
with_items: with_items:
- vim - vim
- wget - wget
- psmisc - psmisc
- tree - tree
- name: deploy sshd config - name: deploy sshd config
template: src=sshd_config.j2 dest=/etc/ssh/sshd_config template: src=sshd_config.j2 dest=/etc/ssh/sshd_config
notify: restart sshd notify: restart sshd
- name: add sshkeys for server - name: add sshkeys for server
authorized_key: authorized_key:
user: root user: root
key: "{{ lookup('file', '../keyfiles/' + item + '.pub') }}" key: "{{ lookup('file', '../keyfiles/' + item + '.pub') }}"
with_items: "{{ administratorenteam }}" with_items: "{{ administratorenteam }}"
when: administratorenteam when: administratorenteam
notify: restart sshd notify: restart sshd
...@@ -4,26 +4,23 @@ ...@@ -4,26 +4,23 @@
apt: apt:
pkg: "{{ item }}" pkg: "{{ item }}"
update_cache: yes update_cache: yes
state: installed state: present
with_items: with_items:
- borgbackup - borgbackup
- logrotate - logrotate
- moreutils - moreutils
- openssl - openssl
- name: create directories 1 - name: create directory
file: file:
path: "{{ item }}" path: "/srv/borgbackup/"
state: "directory" state: "directory"
with_items:
- /srv/borgbackup
- /var/log/borgbackup
- name: create directories 2 - name: create directories
file: file:
path: "/srv/borgbackup/{{ item.key }}" path: "/srv/borgbackup/{{ item }}"
state: "directory" state: "directory"
with_dict: "{{ borgbackup_repos }}" with_items: "{{ borgbackup_repos }}"
- name: generate new repo_passphrase (check) - name: generate new repo_passphrase (check)
stat: stat:
...@@ -56,61 +53,23 @@ ...@@ -56,61 +53,23 @@
# BorgBackup Scripte erstellen # BorgBackup Scripte erstellen
- name: BorgBackup Scripte erstellen (check) - name: BorgBackup Scripte erstellen
template: template: src={{ item }} dest=/srv/borgbackup/{{ item }} mode=o+x
src: borgbackup-check.sh with_items:
dest: /srv/borgbackup/{{ item.key }}/borgbackup-check.sh - "borgbackup-check.sh"
mode: o+x - "borgbackup-create.sh"
with_dict: "{{ borgbackup_repos }}" - "borgbackup-delete.sh"
- "borgbackup-info.sh"
- name: BorgBackup Scripte erstellen (create) - "borgbackup-init.sh"
template: - "borgbackup-list.sh"
src: borgbackup-create.sh - "borgbackup-mount.sh"
dest: /srv/borgbackup/{{ item.key }}/borgbackup-create.sh - "borgbackup-prometheus.sh"
mode: o+x
with_dict: "{{ borgbackup_repos }}" - name: BorgBackup log folder erstellen
file:
- name: BorgBackup Scripte erstellen (delete) path: "/var/log/borgbackup"
template: state: "directory"
src: borgbackup-delete.sh
dest: /srv/borgbackup/{{ item.key }}/borgbackup-delete.sh
mode: o+x
with_dict: "{{ borgbackup_repos }}"
- name: BorgBackup Scripte erstellen (init)
template:
src: borgbackup-init.sh
dest: /srv/borgbackup/{{ item.key }}/borgbackup-init.sh
mode: o+x
with_dict: "{{ borgbackup_repos }}"
- name: BorgBackup Scripte erstellen (info)
template:
src: borgbackup-info.sh
dest: /srv/borgbackup/{{ item.key }}/borgbackup-info.sh
mode: o+x
with_dict: "{{ borgbackup_repos }}"
- name: BorgBackup Scripte erstellen (list)
template:
src: borgbackup-list.sh
dest: /srv/borgbackup/{{ item.key }}/borgbackup-list.sh
mode: o+x
with_dict: "{{ borgbackup_repos }}"
- name: BorgBackup Scripte erstellen (mount)
template:
src: borgbackup-mount.sh
dest: /srv/borgbackup/{{ item.key }}/borgbackup-mount.sh
mode: o+x
with_dict: "{{ borgbackup_repos }}"
- name: BorgBackup Scripte erstellen (prometheus)
template:
src: borgbackup-prometheus.sh
dest: /srv/borgbackup/borgbackup-prometheus.sh
mode: o+x
- name: BorgBackup LogRotate config erstellen - name: BorgBackup LogRotate config erstellen
template: template:
src: logrotate src: logrotate
...@@ -122,9 +81,8 @@ ...@@ -122,9 +81,8 @@
state: absent state: absent
- name: Cronjob für BorgBackup Backup - name: Cronjob für BorgBackup Backup
cron: name="borgbackup-{{ item.key }}" weekday="{{ item.value.weekday }}" hour="{{ item.value.hour }}" minute="{{ item.value.minute }}" job="/srv/borgbackup/{{ item.key }}/borgbackup-create.sh 2>&1 | ts '[\\%Y-\\%m-\\%d \\%H:\\%M:\\%S]' >> /var/log/borgbackup/{{ item.key }}.log" cron: name="borgbackup-create" weekday="{{borgbackup_weekday}}" hour="{{borgbackup_hour}}" minute="{{borgbackup_minute}}" job="/srv/borgbackup/borgbackup-create.sh 2>&1 | ts '[\\%Y-\\%m-\\%d \\%H:\\%M:\\%S]' >> /var/log/borgbackup/borgbackup.log"
with_dict: "{{ borgbackup_repos }}"
- name: Cronjob für BorgBackup Prometheus export - name: Cronjob für BorgBackup Prometheus export
cron: name="borgbackup-prom" weekday="*" hour="*" minute="40" job="/srv/borgbackup/borgbackup-prometheus.sh 2>&1 | ts '[\\%Y-\\%m-\\%d \\%H:\%M:\\%S]' >> /var/log/borgbackup/borgbackup-prometheus.log" cron: name="borgbackup-prom" weekday="*" hour="*" minute="0" job="/srv/borgbackup/borgbackup-prometheus.sh 2>&1 | ts '[\\%Y-\\%m-\\%d \\%H:\%M:\\%S]' >> /var/log/borgbackup/borgbackup.log"
...@@ -5,6 +5,10 @@ ...@@ -5,6 +5,10 @@
export BORG_PASSPHRASE="{{repo_passphrase}}" export BORG_PASSPHRASE="{{repo_passphrase}}"
export BORG_RSH="ssh -i /srv/borgbackup/repo_sshkey" export BORG_RSH="ssh -i /srv/borgbackup/repo_sshkey"
echo "===[ Check Repo: {{ item.value.repo }} ]==="
borg check $1 $2 $3 --info --show-rc {{ item.value.options }} {{ item.value.repo }}
{% for repo_url in borgbackup_repos %}
echo "===[ Check Repo: {{repo_url}} ]============================================================"
borg check $1 $2 $3 --info --show-rc --remote-path borg1 {{repo_url}}
{% endfor %}
\ No newline at end of file
...@@ -8,24 +8,20 @@ export BACKUP_DATE=`date +%Y-%m-%d_%H_%M` ...@@ -8,24 +8,20 @@ export BACKUP_DATE=`date +%Y-%m-%d_%H_%M`
# anschließend Bereinigung # anschließend Bereinigung
# abschließend Integritätscheck # abschließend Integritätscheck
echo "===[ Create Backup: {{ item.value.repo }} ]===" \ {% for repo_url in borgbackup_repos %}
&& \
borg create $1 $2 $3 --info --show-rc --stats --compression {{ item.value.compression }} {{ item.value.options }} {{ item.value.repo }}::$BACKUP_DATE \ echo "===[ Create Backup: {{repo_url}} ]============================================================"
borg create $1 $2 $3 --info --show-rc --remote-path borg1 --stats --compression lzma,2 {{repo_url}}::$BACKUP_DATE \
{% for directory in borgbackup_directories %} {% for directory in borgbackup_directories %}
{{ directory }} \ {{ directory }} \
{% endfor %} \ {% endfor %}
{% if item.value.directories is defined %}
{% for directory in item.value.directories %}
{{ directory }} \
{% endfor %} \
{% endif %}
&& \
echo "===[ Prune old Backups: {{ item.value.repo }} ]===" \
&& \ && \
borg prune $1 $2 $3 --info --show-rc --list {{ item.value.prune }} {{ item.value.options }} {{ item.value.repo }} \ borg prune $1 $2 $3 --info --show-rc --list {{repo_url}} \
{% for prune in borgbackup_prune %}
{{ prune }} \
{% endfor %}
&& \ && \
echo "===[ Check Repo: {{ item.value.repo }} ]===" \ borg check $1 $2 $3 --info --show-rc {{repo_url}}
&& \
borg check $1 $2 $3 --info --show-rc {{ item.value.options }} {{ item.value.repo }} \ {% endfor %}
&& \
date > "/srv/borgbackup/{{ item.key }}/lastbackup"
...@@ -5,11 +5,20 @@ export BORG_RSH="ssh -i /srv/borgbackup/repo_sshkey" ...@@ -5,11 +5,20 @@ export BORG_RSH="ssh -i /srv/borgbackup/repo_sshkey"
# Löschen eines Backups # Löschen eines Backups
echo "Available Repos: "
{% for repo_url in borgbackup_repos %}
echo "{{repo_url}}"
{% endfor %}
echo "RepoName, followed by [ENTER]:"
read repo_url
echo "Available Backups: " echo "Available Backups: "
borg list $1 $2 $3 --info --show-rc {{ item.value.options }} {{ item.value.repo }} borg list $1 $2 $3 --info --show-rc --remote-path borg1 $repo_url
echo "BackupName, followed by [ENTER]:" echo "BackupName, followed by [ENTER]:"
read target read target
borg delete $1 $2 $3 --info --show-rc {{ item.value.options }} {{ item.value.repo }}::$target borg delete $1 $2 $3 --info --show-rc --remote-path borg1 $repo_url::$target
...@@ -5,11 +5,12 @@ export BORG_RSH="ssh -i /srv/borgbackup/repo_sshkey" ...@@ -5,11 +5,12 @@ export BORG_RSH="ssh -i /srv/borgbackup/repo_sshkey"
# Anzeige des Inhaltes in den Borg Backup Archiven # Anzeige des Inhaltes in den Borg Backup Archiven
echo "=============================================" {% for repo_url in borgbackup_repos %}
echo "Backups Repo Info: {{ item.key }} "
BACKUPS=$(borg list $1 $2 $3 --info --show-rc {{ item.value.options }} {{ item.value.repo }}) BACKUPS=$(borg list $1 $2 $3 --info --show-rc --remote-path borg1 {{repo_url}})
echo "============================================="
echo "Backups List "
echo "$BACKUPS" echo "$BACKUPS"
BACKUPS_LIST=$(echo "$BACKUPS" | awk '{print $1}') BACKUPS_LIST=$(echo "$BACKUPS" | awk '{print $1}')
...@@ -18,10 +19,11 @@ for BACKUP in $BACKUPS_LIST; do ...@@ -18,10 +19,11 @@ for BACKUP in $BACKUPS_LIST; do
echo "=============================================" echo "============================================="
borg info $1 $2 $3 --info --show-rc {{ item.value.options }} {{ item.value.repo }}::$BACKUP borg info $1 $2 $3 --info --show-rc --remote-path borg1 {{repo_url}}::$BACKUP
done done
echo "=============================================" echo "============================================="
{% endfor %}
...@@ -6,15 +6,18 @@ ...@@ -6,15 +6,18 @@
export BORG_PASSPHRASE="{{repo_passphrase}}" export BORG_PASSPHRASE="{{repo_passphrase}}"
export BORG_RSH="ssh -i /srv/borgbackup/repo_sshkey" export BORG_RSH="ssh -i /srv/borgbackup/repo_sshkey"
if [ ! -e "/srv/borgbackup/{{ item.key }}/initialized" ]; then {% for repo_url in borgbackup_repos %}
echo "Initialize Repo: {{ item.key }}" if [ ! -e "{{repo_url}}.initialized" ]; then
date > "/srv/borgbackup/{{ item.key }}/initialized"
borg init $1 $2 $3 --info --show-rc --encryption=repokey {{ item.value.options }} {{ item.value.repo }} echo "Initialize Repo: {{repo_url}}"
date > "{{repo_url}}.initialized"
borg init $1 $2 $3 --info --show-rc --remote-path borg1 --encryption=repokey {{repo_url}}
else else
echo "Repo already initialized: {{ item.key }}" echo "Repo already initialized: {{repo_url}}"
fi fi
{% endfor %}
...@@ -5,7 +5,10 @@ export BORG_RSH="ssh -i /srv/borgbackup/repo_sshkey" ...@@ -5,7 +5,10 @@ export BORG_RSH="ssh -i /srv/borgbackup/repo_sshkey"
# Anzeige des Inhaltes in den Borg Backup Archiven # Anzeige des Inhaltes in den Borg Backup Archiven
echo "===[ List Repo: {{ item.key }} ]===" {% for repo_url in borgbackup_repos %}
borg list $1 $2 $3 --info --show-rc {{ item.value.options }} {{ item.value.repo }}
echo "===[ List Repo: {{repo_url}} ]============================================================"
borg list $1 $2 $3 --info --show-rc --remote-path borg1 {{repo_url}}
{% endfor %}
...@@ -5,8 +5,17 @@ export BORG_RSH="ssh -i /srv/borgbackup/repo_sshkey" ...@@ -5,8 +5,17 @@ export BORG_RSH="ssh -i /srv/borgbackup/repo_sshkey"
# Einhängen der Repos in /mnt/ # Einhängen der Repos in /mnt/
echo "Directory to mount, followed by [ENTER]:" echo "Available Repos: "
read target
borg mount $1 $2 $3 --info --show-rc {{ item.value.options }} {{ item.value.repo }} $target {% for repo_url in borgbackup_repos %}
echo "{{repo_url}}"
{% endfor %}
echo "RepoName, followed by [ENTER]:"
read repo_url
echo -n 'Mounting to: /mnt'
mkdir /mnt/
borg mount $1 $2 $3 --info --show-rc --remote-path borg1 $repo_url /mnt/
...@@ -28,14 +28,11 @@ function calc_bytes { ...@@ -28,14 +28,11 @@ function calc_bytes {
esac esac
} }
# create temp file echo "borgbackup_repos_count {{borgbackup_repos|length}}" > $TMP_FILE
TMP_FILE=$(mktemp)
echo "borgbackup_repos_count {{ borgbackup_repos|length }}" > $TMP_FILE
{% for repo in borgbackup_repos %} {% for repo_url in borgbackup_repos %}
BACKUPS=$(borg list {{ borgbackup_repos[repo].options }} {{ borgbackup_repos[repo].repo }}) BACKUPS=$(borg list --remote-path borg1 {{repo_url}})
BACKUPS_LIST=$(echo "$BACKUPS" | awk '{print $1}') BACKUPS_LIST=$(echo "$BACKUPS" | awk '{print $1}')
COUNTER=0 COUNTER=0
...@@ -45,12 +42,15 @@ for BACKUP in $BACKUPS_LIST; do ...@@ -45,12 +42,15 @@ for BACKUP in $BACKUPS_LIST; do
done done
BORG_INFO=$(borg info {{ borgbackup_repos[repo].options }} {{ borgbackup_repos[repo].repo }}::$BACKUP) BORG_INFO=$(borg info --remote-path borg1 {{repo_url}}::$BACKUP)
# create temp file
TMP_FILE=$(mktemp)
echo "borgbackup_count{repo="{{ repo }}"} $COUNTER" >> $TMP_FILE echo "borgbackup_count{repo="{{ repo_url }}"} $COUNTER" > $TMP_FILE
echo "borgbackup_files{repo="{{ repo }}"} $(echo "$BORG_INFO" | grep "Number of files" | awk '{print $4}')" >> $TMP_FILE echo "borgbackup_files{repo="{{ repo_url }}"} $(echo "$BORG_INFO" | grep "Number of files" | awk '{print $4}')" >> $TMP_FILE
echo "borgbackup_chunks_unique{repo="{{ repo }}"} $(echo "$BORG_INFO" | grep "Chunk index" | awk '{print $3}')" >> $TMP_FILE echo "borgbackup_chunks_unique{repo="{{ repo_url }}"} $(echo "$BORG_INFO" | grep "Chunk index" | awk '{print $3}')" >> $TMP_FILE
echo "borgbackup_chunks_total{repo="{{ repo }}"} $(echo "$BORG_INFO" | grep "Chunk index" | awk '{print $4}')" >> $TMP_FILE echo "borgbackup_chunks_total{repo="{{ repo_url }}"} $(echo "$BORG_INFO" | grep "Chunk index" | awk '{print $4}')" >> $TMP_FILE
# byte size calculation # byte size calculation
LAST_SIZE=$(calc_bytes $(echo "$BORG_INFO" |grep "This archive" |awk '{print $3}') $(echo "$BORG_INFO" |grep "This archive" |awk '{print $4}')) LAST_SIZE=$(calc_bytes $(echo "$BORG_INFO" |grep "This archive" |awk '{print $3}') $(echo "$BORG_INFO" |grep "This archive" |awk '{print $4}'))
...@@ -60,12 +60,12 @@ TOTAL_SIZE=$(calc_bytes $(echo "$BORG_INFO" |grep "All archives" |awk '{print $3 ...@@ -60,12 +60,12 @@ TOTAL_SIZE=$(calc_bytes $(echo "$BORG_INFO" |grep "All archives" |awk '{print $3
TOTAL_SIZE_COMPRESSED=$(calc_bytes $(echo "$BORG_INFO" |grep "All archives" |awk '{print $5}') $(echo "$BORG_INFO" |grep "All archives" |awk '{print $6}')) TOTAL_SIZE_COMPRESSED=$(calc_bytes $(echo "$BORG_INFO" |grep "All archives" |awk '{print $5}') $(echo "$BORG_INFO" |grep "All archives" |awk '{print $6}'))
TOTAL_SIZE_DEDUP=$(calc_bytes $(echo "$BORG_INFO" |grep "All archives" |awk '{print $7}') $(echo "$BORG_INFO" |grep "All archives" |awk '{print $8}')) TOTAL_SIZE_DEDUP=$(calc_bytes $(echo "$BORG_INFO" |grep "All archives" |awk '{print $7}') $(echo "$BORG_INFO" |grep "All archives" |awk '{print $8}'))
echo "borgbackup_last_size{repo="{{ repo }}"} $LAST_SIZE" >> $TMP_FILE echo "borgbackup_last_size{repo="{{ repo_url }}"} $LAST_SIZE" >> $TMP_FILE
echo "borgbackup_last_size_compressed{repo="{{ repo }}"} $LAST_SIZE_COMPRESSED" >> $TMP_FILE echo "borgbackup_last_size_compressed{repo="{{ repo_url }}"} $LAST_SIZE_COMPRESSED" >> $TMP_FILE
echo "borgbackup_last_size_dedup{repo="{{ repo }}"} $LAST_SIZE_DEDUP" >> $TMP_FILE echo "borgbackup_last_size_dedup{repo="{{ repo_url }}"} $LAST_SIZE_DEDUP" >> $TMP_FILE
echo "borgbackup_total_size{repo="{{ repo }}"} $TOTAL_SIZE" >> $TMP_FILE echo "borgbackup_total_size{repo="{{ repo_url }}"} $TOTAL_SIZE" >> $TMP_FILE
echo "borgbackup_total_size_compressed{repo="{{ repo }}"} $TOTAL_SIZE_COMPRESSED" >> $TMP_FILE echo "borgbackup_total_size_compressed{repo="{{ repo_url }}"} $TOTAL_SIZE_COMPRESSED" >> $TMP_FILE
echo "borgbackup_total_size_dedup{repo="{{ repo }}"} $TOTAL_SIZE_DEDUP" >> $TMP_FILE echo "borgbackup_total_size_dedup{repo="{{ repo_url }}"} $TOTAL_SIZE_DEDUP" >> $TMP_FILE
{% endfor %} {% endfor %}
...@@ -73,5 +73,5 @@ echo "borgbackup_total_size_dedup{repo="{{ repo }}"} $TOTAL_SIZE_DEDUP" >> $TMP_ ...@@ -73,5 +73,5 @@ echo "borgbackup_total_size_dedup{repo="{{ repo }}"} $TOTAL_SIZE_DEDUP" >> $TMP_
mv $TMP_FILE $PROM_FILE mv $TMP_FILE $PROM_FILE
chown prometheus:prometheus $PROM_FILE chown prometheus:prometheus $PROM_FILE
echo "created BorgBackup statistic for $COUNTER backups in {{ borgbackup_repos|length }} repos: $PROM_FILE" echo "created BorgBackup statistic for $COUNTER backups in $PROM_FILE"
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
apt: apt:
pkg: "{{ item }}" pkg: "{{ item }}"
update_cache: yes update_cache: yes
state: installed state: present
with_items: with_items:
- borgbackup - borgbackup
......
...@@ -4,7 +4,7 @@ ...@@ -4,7 +4,7 @@
apt: apt:
pkg: "{{ item }}" pkg: "{{ item }}"
update_cache: yes update_cache: yes
state: installed state: present
with_items: with_items:
- docker-ce - docker-ce
- python - python
......
--- ---
# Pakete installieren
- name: pakete installieren
- name: stop prometheus-node-exporter
service: name=prometheus-node-exporter state=stopped
# Pakete deinstallieren
- name: pakete deinstallieren
apt: apt:
pkg: "{{ item }}" pkg: "{{ item }}"
update_cache: yes update_cache: no
state: installed state: absent
with_items: with_items:
- prometheus-node-exporter - prometheus-node-exporter
- name: reload systemd and enable service
systemd:
name: prometheus-node-exporter
enabled: yes
daemon_reload: yes
- name: restart prometheus-node-exporter
service: name=prometheus-node-exporter state=restarted
- name: restart telegraf
service: name=telegraf state=restarted
---
# Pakete installieren
- name: pakete installieren
apt:
pkg: "{{ item }}"
update_cache: yes
state: present
with_items:
- telegraf
- name: pakete installieren (host_type = physical)
apt:
pkg: "{{ item }}"
update_cache: yes
state: present
with_items:
- hddtemp
- lm-sensors
- smartmontools
when: host_type == 'physical'
- name: create main config
template: src={{ item }} dest=/etc/telegraf/{{ item }}
with_items:
- telegraf.conf
notify: restart telegraf
- name: reload systemd and enable service
command: systemctl enable telegraf
notify: restart telegraf
- name: delete config file 1
file:
path: "/etc/telegraf/telegraf.d/outputs-influxdb.conf"
state: absent
# Global tags can be specified here in key="value" format.
[global_tags]
# dc = "us-east-1" # will tag all metrics with dc=us-east-1
# rack = "1a"
## Environment variables can be used as tags, and throughout the config file
# user = "$USER"
server = "true"
host_type = "{{host_type}}"
# Configuration for telegraf agent
[agent]
## Default data collection interval for all inputs
interval = "5s"
## Rounds collection interval to 'interval'
## ie, if interval="10s" then always collect on :00, :10, :20, etc.
round_interval = true
## Telegraf will send metrics to outputs in batches of at most
## metric_batch_size metrics.
## This controls the size of writes that Telegraf sends to output plugins.
metric_batch_size = 1000
## For failed writes, telegraf will cache metric_buffer_limit metrics for each
## output, and will flush this buffer on a successful write. Oldest metrics
## are dropped first when this buffer fills.
## This buffer only fills when writes fail to output plugin(s).
metric_buffer_limit = 10000
## Collection jitter is used to jitter the collection by a random amount.
## Each plugin will sleep for a random time within jitter before collecting.
## This can be used to avoid many plugins querying things like sysfs at the
## same time, which can have a measurable effect on the system.
collection_jitter = "0s"
## Default flushing interval for all outputs. You shouldn't set this below
## interval. Maximum flush_interval will be flush_interval + flush_jitter
flush_interval = "10s"
## Jitter the flush interval by a random amount. This is primarily to avoid
## large write spikes for users running a large number of telegraf instances.
## ie, a jitter of 5s and interval 10s means flushes will happen every 10-15s
flush_jitter = "0s"
## By default or when set to "0s", precision will be set to the same
## timestamp order as the collection interval, with the maximum being 1s.
## ie, when interval = "10s", precision will be "1s"
## when interval = "250ms", precision will be "1ms"
## Precision will NOT be used for service inputs. It is up to each individual
## service input to set the timestamp at the appropriate precision.
## Valid time units are "ns", "us" (or "µs"), "ms", "s".
precision = ""
## Logging configuration:
## Run telegraf with debug log messages.
debug = false
## Run telegraf in quiet mode (error log messages only).
quiet = false
## Specify the log file name. The empty string means to log to stderr.
logfile = ""
## Override default hostname, if empty use os.Hostname()
hostname = "{{ inventory_hostname }}"
## If set to true, do no set the "host" tag in the telegraf agent.
omit_hostname = false
###############################################################################
# OUTPUT PLUGINS #
###############################################################################
# Configuration for sending metrics to InfluxDB
[[outputs.influxdb]]
## The full HTTP or UDP URL for your InfluxDB instance.
##
urls = ["{{influxdb_sysmon.url}}"]
## The target database for metrics; will be created as needed.
database = "{{influxdb_sysmon.db}}"
## If true, no CREATE DATABASE queries will be sent. Set to true when using
## Telegraf with a user without permissions to create databases or when the
## database already exists.
skip_database_creation = true
## Name of existing retention policy to write to. Empty string writes to
## the default retention policy.
# retention_policy = ""
## Write consistency (clusters only), can be: "any", "one", "quorum", "all"
# write_consistency = "any"
## Timeout for HTTP messages.
# timeout = "5s"
## HTTP Basic Auth
username = "{{influxdb_sysmon.user}}"
password = "{{influxdb_sysmon.password}}"
## HTTP User-Agent
# user_agent = "telegraf"
## UDP payload size is the maximum packet size to send.
# udp_payload = 512
## Optional SSL Config
# ssl_ca = "/etc/telegraf/ca.pem"
# ssl_cert = "/etc/telegraf/cert.pem"
# ssl_key = "/etc/telegraf/key.pem"
## Use SSL but skip chain & host verification
# insecure_skip_verify = false
## HTTP Proxy override, if unset values the standard proxy environment
## variables are consulted to determine which proxy, if any, should be used.
# http_proxy = "http://corporate.proxy:3128"
## Additional HTTP headers
# http_headers = {"X-Special-Header" = "Special-Value"}
## HTTP Content-Encoding for write request body, can be set to "gzip" to
## compress body or "identity" to apply no encoding.
# content_encoding = "identity"
## When true, Telegraf will output unsigned integers as unsigned values,
## i.e.: "42u". You will need a version of InfluxDB supporting unsigned
## integer values. Enabling this option will result in field type errors if
## existing data has been written.
# influx_uint_support = false
###############################################################################
# INPUT PLUGINS #
###############################################################################
# Read metrics about system load & uptime
[[inputs.system]]
# no configuration
[[inputs.interrupts]]
# no configuration
[[inputs.linux_sysctl_fs]]
# no configuration
# Get kernel statistics from /proc/stat
[[inputs.kernel]]
# no configuration
# Read metrics about memory usage
[[inputs.mem]]
# no configuration
# Read metrics about swap memory usage
[[inputs.swap]]
# no configuration
# Get the number of processes and group them by status
[[inputs.processes]]
# no configuration
# Read metrics about cpu usage
[[inputs.cpu]]
## Whether to report per-cpu stats or not
percpu = true
## Whether to report total system cpu stats or not
totalcpu = true
## If true, collect raw CPU time metrics.
collect_cpu_time = false
## If true, compute and report the sum of all non-idle CPU states.
report_active = false
# Read metrics about disk usage by mount point
[[inputs.disk]]
## By default stats will be gathered for all mount points.
## Set mount_points will restrict the stats to only the specified mount points.
mount_points = ["/", "/srv"]
## Ignore mount points by filesystem type.
ignore_fs = ["tmpfs", "devtmpfs", "devfs", "sysfs", "overlay" ]
# Read metrics about disk IO by device
[[inputs.diskio]]
## By default, telegraf will gather stats for all devices including
## disk partitions.
## Setting devices will restrict the stats to the specified devices.
# devices = ["sda", "sdb"]
## Uncomment the following line if you need disk serial numbers.
# skip_serial_number = false
#
## On systems which support it, device metadata can be added in the form of
## tags.
## Currently only Linux is supported via udev properties. You can view
## available properties for a device by running:
## 'udevadm info -q property -n /dev/sda'
# device_tags = ["ID_FS_TYPE", "ID_FS_USAGE"]
#
## Using the same metadata source as device_tags, you can also customize the
## name of the device via templates.
## The 'name_templates' parameter is a list of templates to try and apply to
## the device. The template may contain variables in the form of '$PROPERTY' or
## '${PROPERTY}'. The first template which does not contain any variables not
## present for the device is used as the device name tag.
## The typical use case is for LVM volumes, to get the VG/LV name instead of
## the near-meaningless DM-0 name.
# name_templates = ["$ID_FS_LABEL","$DM_VG_NAME/$DM_LV_NAME"]
# Read metrics about network interface usage
[[inputs.net]]
## By default, telegraf gathers stats from any up interface (excluding loopback)
## Setting interfaces will tell it to gather these explicit interfaces,
## regardless of status.
##
interfaces = ["eth*", "en*"]
##
## On linux systems telegraf also collects protocol stats.
## Setting ignore_protocol_stats to true will skip reporting of protocol metrics.
##
# ignore_protocol_stats = false
##
# Read metrics about network usage
[[inputs.netstat]]
# no configuration
# Collects conntrack stats from the configured directories and files.
[[inputs.conntrack]]
## The following defaults would work with multiple versions of conntrack.
## Note the nf_ and ip_ filename prefixes are mutually exclusive across
## kernel versions, as are the directory locations.
## Superset of filenames to look for within the conntrack dirs.
## Missing files will be ignored.
files = ["ip_conntrack_count","ip_conntrack_max",
"nf_conntrack_count","nf_conntrack_max"]
## Directories to search within for the conntrack files above.
## Missing directrories will be ignored.
dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
{% if host_type == 'physical' %}
# Monitor disks' temperatures using hddtemp
[[inputs.hddtemp]]
## By default, telegraf gathers temps data from all disks detected by the
## hddtemp.
##
## Only collect temps from the selected disks.
##
## A * as the device name will return the temperature values of all disks.
##
# address = "127.0.0.1:7634"
# devices = ["sda", "*"]
# Monitor sensors, requires lm-sensors package
[[inputs.sensors]]
## Remove numbers from field names.
## If true, a field name like 'temp1_input' will be changed to 'temp_input'.
# remove_numbers = true
# Read metrics from storage devices supporting S.M.A.R.T.
[[inputs.smart]]
## Optionally specify the path to the smartctl executable
# path = "/usr/bin/smartctl"
#
## On most platforms smartctl requires root access.
## Setting 'use_sudo' to true will make use of sudo to run smartctl.
## Sudo must be configured to to allow the telegraf user to run smartctl
## with out password.
# use_sudo = false
#
## Skip checking disks in this power mode. Defaults to
## "standby" to not wake up disks that have stoped rotating.
## See --nocheck in the man pages for smartctl.
## smartctl version 5.41 and 5.42 have faulty detection of
## power mode and might require changing this value to
## "never" depending on your disks.
# nocheck = "standby"
#
## Gather detailed metrics for each SMART Attribute.
## Defaults to "false"
##
# attributes = false
#
## Optionally specify devices to exclude from reporting.
# excludes = [ "/dev/pass6" ]
#
## Optionally specify devices and device type, if unset
## a scan (smartctl --scan) for S.M.A.R.T. devices will
## done and all found will be included except for the
## excluded in excludes.
# devices = [ "/dev/ada0 -d atacam" ]
{% else %}
{% endif %}
\ No newline at end of file
...@@ -8,7 +8,7 @@ ...@@ -8,7 +8,7 @@
# #
# Beispiel: (Auslesen von Passörtern aus /srv/xyz/secret_pw, registrierung als Variable secret_pw, erzeugung mit 24 Zeichen falls nicht vorhanden) # Beispiel: (Auslesen von Passörtern aus /srv/xyz/secret_pw, registrierung als Variable secret_pw, erzeugung mit 24 Zeichen falls nicht vorhanden)
# #
# - include: ../functions/get_secret.yml # - import_playbook: ../functions/get_secret.yml
# with_items: # with_items:
# - { path: /srv/xyz/secret_pw, length: 24 } # - { path: /srv/xyz/secret_pw, length: 24 }
# - { path: /srv/xyz/secret2_pw, length: 12 } # - { path: /srv/xyz/secret2_pw, length: 12 }
...@@ -30,7 +30,7 @@ ...@@ -30,7 +30,7 @@
apt: apt:
pkg: openssl pkg: openssl
update_cache: no update_cache: no
state: installed state: present
when: filestat.stat.exists == False when: filestat.stat.exists == False
- name: "{{ item.path | basename }} (generate: length = {{ item.length }})" - name: "{{ item.path | basename }} (generate: length = {{ item.length }})"
......
...@@ -6,5 +6,20 @@ ldap_ip_ext: 10.0.20.2 ...@@ -6,5 +6,20 @@ ldap_ip_ext: 10.0.20.2
# int ist noch ungenutzt / später replikation in der Zone # int ist noch ungenutzt / später replikation in der Zone
ldap_ip_int: 10.0.20.2 ldap_ip_int: 10.0.20.2
ldap_base_dn: DC=warpzone,DC=ms ldap_base_dn: dc=warpzone,dc=ms
ldap_readonly_bind_dn: CN=readonly,DC=warpzone,DC=ms ldap_readonly_bind_dn: cn=readonly,dc=warpzone,dc=ms
\ No newline at end of file
# Zentrale InfluxDb für Systemmonitoring
influxdb_sysmon:
url: "http://192.168.0.201:18086"
db: "influx"
user: "influx"
password: "influx"
# Zentrale InfluxDb für Snmp Daten
influxdb_snmp:
url: "http://192.168.0.201:28086"
db: "influx"
user: "influx"
password: "influx"
...@@ -16,6 +16,9 @@ debian_keys_url: ...@@ -16,6 +16,9 @@ debian_keys_url:
- "https://homegear.eu/packages/Release.key" - "https://homegear.eu/packages/Release.key"
- "https://bintray.com/user/downloadSubjectPublicKey?username=openhab" - "https://bintray.com/user/downloadSubjectPublicKey?username=openhab"
# Art des Hosts: physical, vm, docker
host_type: "physical"
administratorenteam: administratorenteam:
- "user51" - "user51"
- "void" - "void"
......
...@@ -14,6 +14,9 @@ debian_keys_id: ...@@ -14,6 +14,9 @@ debian_keys_id:
debian_keys_url: debian_keys_url:
# Art des Hosts: physical, vm, docker
host_type: "physical"
administratorenteam: administratorenteam:
- "void" - "void"
- "sandhome" - "sandhome"
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment