summaryrefslogtreecommitdiff
path: root/data/templates/telegraf/telegraf.j2
blob: f382dbf2e6e3be32b792ea16b69deb5dc0711b20 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
# Generated by /usr/libexec/vyos/conf_mode/service_monitoring_telegraf.py

[agent]
  interval = "15s"
  round_interval = true
  metric_batch_size = 1000
  metric_buffer_limit = 10000
  collection_jitter = "5s"
  flush_interval = "15s"
  flush_jitter = "0s"
  precision = ""
  debug = false
  quiet = false
  logfile = ""
  hostname = "{{ hostname }}"
  omit_hostname = false
{% if azure_data_explorer is vyos_defined %}
### Azure Data Explorer ###
[[outputs.azure_data_explorer]]
  ## The URI property of the Azure Data Explorer resource on Azure
  endpoint_url = "{{ azure_data_explorer.url }}"

  ## The Azure Data Explorer database that the metrics will be ingested into.
  ## The plugin will NOT generate this database automatically, it's expected that this database already exists before ingestion.
  database = "{{ azure_data_explorer.database }}"
  metrics_grouping_type = "{{ azure_data_explorer.group_metrics }}"

  ## Name of the single table to store all the metrics (Only needed if metrics_grouping_type is "SingleTable").
{%     if azure_data_explorer.table is vyos_defined and azure_data_explorer.group_metrics == 'SingleTable' %}
  table_name = "{{ azure_data_explorer.table }}"
{%     endif %}
### End Azure Data Explorer ###
{% endif %}
{% if influxdb is vyos_defined %}
### InfluxDB2 ###
[[outputs.influxdb_v2]]
  urls = ["{{ influxdb.url }}:{{ influxdb.port }}"]
  insecure_skip_verify = true
  token = "$INFLUX_TOKEN"
  organization = "{{ influxdb.authentication.organization }}"
  bucket = "{{ influxdb.bucket }}"
### End InfluxDB2 ###
{% endif %}
{% if loki is vyos_defined %}
### Loki ###
[[outputs.loki]]
  ## The domain of Loki
  domain = "{{ loki.url }}:{{ loki.port }}"
{%     if loki.authentication.username is vyos_defined and loki.authentication.password is vyos_defined  %}
  ## Basic Authentication
  username = "{{ loki.authentication.username }}"
  password = "{{ loki.authentication.password }}"
{%     endif %}
{%     if loki.metric_name_label is vyos_defined %}
metric_name_label = "{{ loki.metric_name_label }}"
{%     endif %}
### End Loki ###
{% endif %}
{% if prometheus_client is vyos_defined %}
### Prometheus ###
[[outputs.prometheus_client]]
  ## Address to listen on
  listen = "{{ prometheus_client.listen_address | bracketize_ipv6 if prometheus_client.listen_address is vyos_defined else '' }}:{{ prometheus_client.port }}"
  metric_version = {{ prometheus_client.metric_version }}
{%     if prometheus_client.authentication.username is vyos_defined and prometheus_client.authentication.password is vyos_defined  %}
  ## Use HTTP Basic Authentication
  basic_username = "{{ prometheus_client.authentication.username }}"
  basic_password = "{{ prometheus_client.authentication.password }}"
{%     endif %}
{%     if prometheus_client.allow_from is vyos_defined %}
  ip_range = {{ prometheus_client.allow_from }}
{%     endif %}
### End Prometheus ###
{% endif %}
{% if splunk is vyos_defined %}
### Splunk ###
[[outputs.http]]
  ## URL is the address to send metrics to
  url = "{{ splunk.url }}"
  ## Timeout for HTTP message
  # timeout = "5s"
  ## Use TLS but skip chain & host verification
{%     if splunk.authentication.insecure is vyos_defined %}
  insecure_skip_verify = true
{%     endif %}
  ## Data format to output
  data_format = "splunkmetric"
  ## Provides time, index, source overrides for the HEC
  splunkmetric_hec_routing = true
  ## Additional HTTP headers
   [outputs.http.headers]
   # Should be set manually to "application/json" for json data_format
     Content-Type = "application/json"
     Authorization = "Splunk {{ splunk.authentication.token }}"
     X-Splunk-Request-Channel = "{{ splunk.authentication.token }}"
### End Splunk ###
{% endif %}
[[inputs.cpu]]
    percpu = true
    totalcpu = true
    collect_cpu_time = false
    report_active = false
[[inputs.disk]]
    ignore_fs = ["devtmpfs", "devfs"]
[[inputs.diskio]]
[[inputs.mem]]
[[inputs.net]]
    ignore_protocol_stats = true
[[inputs.nstat]]
[[inputs.system]]
[[inputs.netstat]]
[[inputs.processes]]
[[inputs.kernel]]
[[inputs.interrupts]]
[[inputs.linux_sysctl_fs]]
[[inputs.systemd_units]]
[[inputs.conntrack]]
  files = ["ip_conntrack_count","ip_conntrack_max","nf_conntrack_count","nf_conntrack_max"]
  dirs = ["/proc/sys/net/ipv4/netfilter","/proc/sys/net/netfilter"]
[[inputs.ethtool]]
  interface_include = {{ interfaces_ethernet }}
[[inputs.chrony]]
  dns_lookup = true
[[inputs.internal]]
[[inputs.nstat]]
[[inputs.syslog]]
  server = "unixgram:///run/telegraf/telegraf_syslog.sock"
  best_effort = true
  syslog_standard = "RFC3164"
{% if influxdb is vyos_defined %}
[[inputs.exec]]
  commands = [
    "{{ custom_scripts_dir }}/show_firewall_input_filter.py",
    "{{ custom_scripts_dir }}/show_interfaces_input_filter.py",
    "{{ custom_scripts_dir }}/vyos_services_input_filter.py"
  ]
  timeout = "10s"
  data_format = "influx"
{% endif %}